--- /dev/null
+From 401b9a9a678be30083ff7f68fb1ab3fb898d50b4 Mon Sep 17 00:00:00 2001
+From: Hannes Reinecke <hare@suse.de>
+Date: Wed, 17 Sep 2008 16:35:05 +0200
+Subject: [PATCH] ixgbe: Bugfixes for FCoE
+
+Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/net/Kconfig | 11
+ drivers/net/ixgbe/ixgbe.h | 113 +-
+ drivers/net/ixgbe/ixgbe_82598.c | 628 +++++++++---
+ drivers/net/ixgbe/ixgbe_common.c | 1064 ++++++++++++++------
+ drivers/net/ixgbe/ixgbe_common.h | 60 -
+ drivers/net/ixgbe/ixgbe_ethtool.c | 307 +++--
+ drivers/net/ixgbe/ixgbe_main.c | 1981 +++++++++++++++++++++-----------------
+ drivers/net/ixgbe/ixgbe_phy.c | 248 +---
+ drivers/net/ixgbe/ixgbe_phy.h | 63 -
+ drivers/net/ixgbe/ixgbe_type.h | 559 ++++++----
+ 10 files changed, 3159 insertions(+), 1875 deletions(-)
+
+--- a/drivers/net/ixgbe/ixgbe_82598.c
++++ b/drivers/net/ixgbe/ixgbe_82598.c
+@@ -1,7 +1,7 @@
+ /*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+- Copyright(c) 1999 - 2007 Intel Corporation.
++ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+@@ -20,7 +20,6 @@
+ the file called "COPYING".
+
+ Contact Information:
+- Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+@@ -36,67 +35,62 @@
+ #define IXGBE_82598_MAX_TX_QUEUES 32
+ #define IXGBE_82598_MAX_RX_QUEUES 64
+ #define IXGBE_82598_RAR_ENTRIES 16
++#define IXGBE_82598_MC_TBL_SIZE 128
++#define IXGBE_82598_VFT_TBL_SIZE 128
+
+-static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw);
+-static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed,
+- bool *autoneg);
+-static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw,
+- u32 *speed, bool *autoneg);
+-static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
+-static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw);
+-static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed,
+- bool *link_up);
+-static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, u32 speed,
+- bool autoneg,
+- bool autoneg_wait_to_complete);
++static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
++ ixgbe_link_speed *speed,
++ bool *autoneg);
+ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw);
+-static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed,
+- bool autoneg,
+- bool autoneg_wait_to_complete);
+-static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
+-
++static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
++ ixgbe_link_speed speed,
++ bool autoneg,
++ bool autoneg_wait_to_complete);
+
++/**
++ */
+ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
+ {
+- hw->mac.num_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
+- hw->mac.num_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
+- hw->mac.num_rx_addrs = IXGBE_82598_RAR_ENTRIES;
+-
+- /* PHY ops are filled in by default properly for Fiber only */
+- if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
+- hw->mac.ops.setup_link = &ixgbe_setup_copper_link_82598;
+- hw->mac.ops.setup_link_speed = &ixgbe_setup_copper_link_speed_82598;
+- hw->mac.ops.get_link_settings =
+- &ixgbe_get_copper_link_settings_82598;
+-
+- /* Call PHY identify routine to get the phy type */
+- ixgbe_identify_phy(hw);
+-
+- switch (hw->phy.type) {
+- case ixgbe_phy_tn:
+- hw->phy.ops.setup_link = &ixgbe_setup_tnx_phy_link;
+- hw->phy.ops.check_link = &ixgbe_check_tnx_phy_link;
+- hw->phy.ops.setup_link_speed =
+- &ixgbe_setup_tnx_phy_link_speed;
+- break;
+- default:
+- break;
+- }
++ struct ixgbe_mac_info *mac = &hw->mac;
++ struct ixgbe_phy_info *phy = &hw->phy;
++
++ /* Call PHY identify routine to get the phy type */
++ ixgbe_identify_phy_generic(hw);
++
++ /* PHY Init */
++ switch (phy->type) {
++ default:
++ break;
+ }
+
++ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
++ mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
++ mac->ops.setup_link_speed =
++ &ixgbe_setup_copper_link_speed_82598;
++ mac->ops.get_link_capabilities =
++ &ixgbe_get_copper_link_capabilities_82598;
++ }
++
++ mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
++ mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
++ mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
++ mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
++ mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
++
+ return 0;
+ }
+
+ /**
+- * ixgbe_get_link_settings_82598 - Determines default link settings
++ * ixgbe_get_link_capabilities_82598 - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: boolean auto-negotiation value
+ *
+- * Determines the default link settings by reading the AUTOC register.
++ * Determines the link capabilities by reading the AUTOC register.
+ **/
+-static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed,
+- bool *autoneg)
++static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
++ ixgbe_link_speed *speed,
++ bool *autoneg)
+ {
+ s32 status = 0;
+ s32 autoc_reg;
+@@ -145,15 +139,16 @@ static s32 ixgbe_get_link_settings_82598
+ }
+
+ /**
+- * ixgbe_get_copper_link_settings_82598 - Determines default link settings
++ * ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: boolean auto-negotiation value
+ *
+- * Determines the default link settings by reading the AUTOC register.
++ * Determines the link capabilities by reading the AUTOC register.
+ **/
+-static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw,
+- u32 *speed, bool *autoneg)
++s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
++ ixgbe_link_speed *speed,
++ bool *autoneg)
+ {
+ s32 status = IXGBE_ERR_LINK_SETUP;
+ u16 speed_ability;
+@@ -161,9 +156,9 @@ static s32 ixgbe_get_copper_link_setting
+ *speed = 0;
+ *autoneg = true;
+
+- status = ixgbe_read_phy_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
+- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+- &speed_ability);
++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
++ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
++ &speed_ability);
+
+ if (status == 0) {
+ if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
+@@ -191,11 +186,9 @@ static enum ixgbe_media_type ixgbe_get_m
+ case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+ case IXGBE_DEV_ID_82598EB_CX4:
+ case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
++ case IXGBE_DEV_ID_82598EB_XF_LR:
+ media_type = ixgbe_media_type_fiber;
+ break;
+- case IXGBE_DEV_ID_82598AT_DUAL_PORT:
+- media_type = ixgbe_media_type_copper;
+- break;
+ default:
+ media_type = ixgbe_media_type_unknown;
+ break;
+@@ -205,6 +198,122 @@ static enum ixgbe_media_type ixgbe_get_m
+ }
+
+ /**
++ * ixgbe_setup_fc_82598 - Configure flow control settings
++ * @hw: pointer to hardware structure
++ * @packetbuf_num: packet buffer number (0-7)
++ *
++ * Configures the flow control settings based on SW configuration. This
++ * function is used for 802.3x flow control configuration only.
++ **/
++s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
++{
++ u32 frctl_reg;
++ u32 rmcs_reg;
++
++ if (packetbuf_num < 0 || packetbuf_num > 7) {
++ hw_dbg(hw, "Invalid packet buffer number [%d], expected range is"
++ " 0-7\n", packetbuf_num);
++ }
++
++ frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
++ frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
++
++ rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
++ rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
++
++ /*
++ * 10 gig parts do not have a word in the EEPROM to determine the
++ * default flow control setting, so we explicitly set it to full.
++ */
++ if (hw->fc.type == ixgbe_fc_default)
++ hw->fc.type = ixgbe_fc_full;
++
++ /*
++ * We want to save off the original Flow Control configuration just in
++ * case we get disconnected and then reconnected into a different hub
++ * or switch with different Flow Control capabilities.
++ */
++ hw->fc.original_type = hw->fc.type;
++
++ /*
++ * The possible values of the "flow_control" parameter are:
++ * 0: Flow control is completely disabled
++ * 1: Rx flow control is enabled (we can receive pause frames but not
++ * send pause frames).
++ * 2: Tx flow control is enabled (we can send pause frames but we do not
++ * support receiving pause frames)
++ * 3: Both Rx and Tx flow control (symmetric) are enabled.
++ * other: Invalid.
++ */
++ switch (hw->fc.type) {
++ case ixgbe_fc_none:
++ break;
++ case ixgbe_fc_rx_pause:
++ /*
++ * Rx Flow control is enabled,
++ * and Tx Flow control is disabled.
++ */
++ frctl_reg |= IXGBE_FCTRL_RFCE;
++ break;
++ case ixgbe_fc_tx_pause:
++ /*
++ * Tx Flow control is enabled, and Rx Flow control is disabled,
++ * by a software over-ride.
++ */
++ rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
++ break;
++ case ixgbe_fc_full:
++ /*
++ * Flow control (both Rx and Tx) is enabled by a software
++ * over-ride.
++ */
++ frctl_reg |= IXGBE_FCTRL_RFCE;
++ rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
++ break;
++ default:
++ /* We should never get here. The value should be 0-3. */
++ hw_dbg(hw, "Flow control param set incorrectly\n");
++ break;
++ }
++
++ /* Enable 802.3x based flow control settings. */
++ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg);
++ IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
++
++ /*
++ * Check for invalid software configuration, zeros are completely
++ * invalid for all parameters used past this point, and if we enable
++ * flow control with zero water marks, we blast flow control packets.
++ */
++ if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
++ hw_dbg(hw, "Flow control structure initialized incorrectly\n");
++ return IXGBE_ERR_INVALID_LINK_SETTINGS;
++ }
++
++ /*
++ * We need to set up the Receive Threshold high and low water
++ * marks as well as (optionally) enabling the transmission of
++ * XON frames.
++ */
++ if (hw->fc.type & ixgbe_fc_tx_pause) {
++ if (hw->fc.send_xon) {
++ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
++ (hw->fc.low_water | IXGBE_FCRTL_XONE));
++ } else {
++ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
++ hw->fc.low_water);
++ }
++ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
++ (hw->fc.high_water)|IXGBE_FCRTH_FCEN);
++ }
++
++ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time);
++ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
++
++ return 0;
++}
++
++/**
+ * ixgbe_setup_mac_link_82598 - Configures MAC link settings
+ * @hw: pointer to hardware structure
+ *
+@@ -248,8 +357,7 @@ static s32 ixgbe_setup_mac_link_82598(st
+ }
+ if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+ status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+- hw_dbg(hw,
+- "Autonegotiation did not complete.\n");
++ hw_dbg(hw, "Autonegotiation did not complete.\n");
+ }
+ }
+ }
+@@ -259,8 +367,8 @@ static s32 ixgbe_setup_mac_link_82598(st
+ * case we get disconnected and then reconnected into a different hub
+ * or switch with different Flow Control capabilities.
+ */
+- hw->fc.type = hw->fc.original_type;
+- ixgbe_setup_fc(hw, 0);
++ hw->fc.original_type = hw->fc.type;
++ ixgbe_setup_fc_82598(hw, 0);
+
+ /* Add delay to filter out noises during initial link setup */
+ msleep(50);
+@@ -273,20 +381,35 @@ static s32 ixgbe_setup_mac_link_82598(st
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
++ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+-static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed,
+- bool *link_up)
++static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
++ ixgbe_link_speed *speed, bool *link_up,
++ bool link_up_wait_to_complete)
+ {
+ u32 links_reg;
++ u32 i;
+
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+-
+- if (links_reg & IXGBE_LINKS_UP)
+- *link_up = true;
+- else
+- *link_up = false;
++ if (link_up_wait_to_complete) {
++ for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
++ if (links_reg & IXGBE_LINKS_UP) {
++ *link_up = true;
++ break;
++ } else {
++ *link_up = false;
++ }
++ msleep(100);
++ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
++ }
++ } else {
++ if (links_reg & IXGBE_LINKS_UP)
++ *link_up = true;
++ else
++ *link_up = false;
++ }
+
+ if (links_reg & IXGBE_LINKS_SPEED)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+@@ -296,6 +419,7 @@ static s32 ixgbe_check_mac_link_82598(st
+ return 0;
+ }
+
++
+ /**
+ * ixgbe_setup_mac_link_speed_82598 - Set MAC link speed
+ * @hw: pointer to hardware structure
+@@ -306,18 +430,18 @@ static s32 ixgbe_check_mac_link_82598(st
+ * Set the link speed in the AUTOC register and restarts link.
+ **/
+ static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
+- u32 speed, bool autoneg,
+- bool autoneg_wait_to_complete)
++ ixgbe_link_speed speed, bool autoneg,
++ bool autoneg_wait_to_complete)
+ {
+ s32 status = 0;
+
+ /* If speed is 10G, then check for CX4 or XAUI. */
+ if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
+- (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4)))
++ (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) {
+ hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
+- else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg))
++ } else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) {
+ hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
+- else if (autoneg) {
++ } else if (autoneg) {
+ /* BX mode - Autonegotiate 1G */
+ if (!(hw->mac.link_attach_type & IXGBE_AUTOC_1G_PMA_PMD))
+ hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_AN;
+@@ -336,7 +460,7 @@ static s32 ixgbe_setup_mac_link_speed_82
+ * ixgbe_hw This will write the AUTOC register based on the new
+ * stored values
+ */
+- hw->mac.ops.setup_link(hw);
++ ixgbe_setup_mac_link_82598(hw);
+ }
+
+ return status;
+@@ -354,18 +478,17 @@ static s32 ixgbe_setup_mac_link_speed_82
+ **/
+ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw)
+ {
+- s32 status = 0;
++ s32 status;
+
+ /* Restart autonegotiation on PHY */
+- if (hw->phy.ops.setup_link)
+- status = hw->phy.ops.setup_link(hw);
++ status = hw->phy.ops.setup_link(hw);
+
+- /* Set MAC to KX/KX4 autoneg, which defaultis to Parallel detection */
++ /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
+ hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
+ hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN;
+
+ /* Set up MAC */
+- hw->mac.ops.setup_link(hw);
++ ixgbe_setup_mac_link_82598(hw);
+
+ return status;
+ }
+@@ -379,23 +502,23 @@ static s32 ixgbe_setup_copper_link_82598
+ *
+ * Sets the link speed in the AUTOC register in the MAC and restarts link.
+ **/
+-static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed,
+- bool autoneg,
+- bool autoneg_wait_to_complete)
++static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
++ ixgbe_link_speed speed,
++ bool autoneg,
++ bool autoneg_wait_to_complete)
+ {
+- s32 status = 0;
++ s32 status;
+
+ /* Setup the PHY according to input speed */
+- if (hw->phy.ops.setup_link_speed)
+- status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+- autoneg_wait_to_complete);
++ status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
++ autoneg_wait_to_complete);
+
+ /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
+ hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
+ hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN;
+
+ /* Set up MAC */
+- hw->mac.ops.setup_link(hw);
++ ixgbe_setup_mac_link_82598(hw);
+
+ return status;
+ }
+@@ -404,7 +527,7 @@ static s32 ixgbe_setup_copper_link_speed
+ * ixgbe_reset_hw_82598 - Performs hardware reset
+ * @hw: pointer to hardware structure
+ *
+- * Resets the hardware by reseting the transmit and receive units, masks and
++ * Resets the hardware by resetting the transmit and receive units, masks and
+ * clears all interrupts, performing a PHY reset, and performing a link (MAC)
+ * reset.
+ **/
+@@ -418,35 +541,44 @@ static s32 ixgbe_reset_hw_82598(struct i
+ u8 analog_val;
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+- ixgbe_stop_adapter(hw);
++ hw->mac.ops.stop_adapter(hw);
+
+ /*
+- * Power up the Atlas TX lanes if they are currently powered down.
+- * Atlas TX lanes are powered down for MAC loopback tests, but
++ * Power up the Atlas Tx lanes if they are currently powered down.
++ * Atlas Tx lanes are powered down for MAC loopback tests, but
+ * they are not automatically restored on reset.
+ */
+- ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
++ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
+ if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
+- /* Enable TX Atlas so packets can be transmitted again */
+- ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
++ /* Enable Tx Atlas so packets can be transmitted again */
++ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
++ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
+- ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, analog_val);
++ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
++ analog_val);
+
+- ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &analog_val);
++ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
++ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
+- ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, analog_val);
++ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
++ analog_val);
+
+- ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &analog_val);
++ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
++ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
+- ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, analog_val);
++ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
++ analog_val);
+
+- ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &analog_val);
++ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
++ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
+- ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, analog_val);
++ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
++ analog_val);
+ }
+
+ /* Reset PHY */
+- ixgbe_reset_phy(hw);
++ if (hw->phy.reset_disable == false)
++ hw->phy.ops.reset(hw);
+
+ /*
+ * Prevent the PCI-E bus from from hanging by disabling PCI-E master
+@@ -499,29 +631,311 @@ static s32 ixgbe_reset_hw_82598(struct i
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+ } else {
+ hw->mac.link_attach_type =
+- (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE);
++ (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE);
+ hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK);
+ hw->mac.link_settings_loaded = true;
+ }
+
+ /* Store the permanent mac address */
+- ixgbe_get_mac_addr(hw, hw->mac.perm_addr);
++ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ return status;
+ }
+
++/**
++ * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
++ * @hw: pointer to hardware struct
++ * @rar: receive address register index to associate with a VMDq index
++ * @vmdq: VMDq set index
++ **/
++s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
++{
++ u32 rar_high;
++
++ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
++ rar_high &= ~IXGBE_RAH_VIND_MASK;
++ rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
++ IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
++ return 0;
++}
++
++/**
++ * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
++ * @hw: pointer to hardware struct
++ * @rar: receive address register index to associate with a VMDq index
++ * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
++ **/
++static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
++{
++ u32 rar_high;
++ u32 rar_entries = hw->mac.num_rar_entries;
++
++ if (rar < rar_entries) {
++ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
++ if (rar_high & IXGBE_RAH_VIND_MASK) {
++ rar_high &= ~IXGBE_RAH_VIND_MASK;
++ IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
++ }
++ } else {
++ hw_dbg(hw, "RAR index %d is out of range.\n", rar);
++ }
++
++ return 0;
++}
++
++/**
++ * ixgbe_set_vfta_82598 - Set VLAN filter table
++ * @hw: pointer to hardware structure
++ * @vlan: VLAN id to write to VLAN filter
++ * @vind: VMDq output index that maps queue to VLAN id in VFTA
++ * @vlan_on: boolean flag to turn on/off VLAN in VFTA
++ *
++ * Turn on/off specified VLAN in the VLAN filter table.
++ **/
++s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
++ bool vlan_on)
++{
++ u32 regindex;
++ u32 bitindex;
++ u32 bits;
++ u32 vftabyte;
++
++ if (vlan > 4095)
++ return IXGBE_ERR_PARAM;
++
++ /* Determine 32-bit word position in array */
++ regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
++
++ /* Determine the location of the (VMD) queue index */
++ vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
++ bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
++
++ /* Set the nibble for VMD queue index */
++ bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
++ bits &= (~(0x0F << bitindex));
++ bits |= (vind << bitindex);
++ IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
++
++ /* Determine the location of the bit for this VLAN id */
++ bitindex = vlan & 0x1F; /* lower five bits */
++
++ bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
++ if (vlan_on)
++ /* Turn on this VLAN id */
++ bits |= (1 << bitindex);
++ else
++ /* Turn off this VLAN id */
++ bits &= ~(1 << bitindex);
++ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
++
++ return 0;
++}
++
++/**
++ * ixgbe_clear_vfta_82598 - Clear VLAN filter table
++ * @hw: pointer to hardware structure
++ *
++ * Clears the VLAN filer table, and the VMDq index associated with the filter
++ **/
++static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
++{
++ u32 offset;
++ u32 vlanbyte;
++
++ for (offset = 0; offset < hw->mac.vft_size; offset++)
++ IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
++
++ for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
++ for (offset = 0; offset < hw->mac.vft_size; offset++)
++ IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
++ 0);
++
++ return 0;
++}
++
++/**
++ * ixgbe_blink_led_start_82598 - Blink LED based on index.
++ * @hw: pointer to hardware structure
++ * @index: led number to blink
++ **/
++static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index)
++{
++ ixgbe_link_speed speed = 0;
++ bool link_up = 0;
++ u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
++ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
++
++ /*
++ * Link must be up to auto-blink the LEDs on the 82598EB MAC;
++ * force it if link is down.
++ */
++ hw->mac.ops.check_link(hw, &speed, &link_up, false);
++
++ if (!link_up) {
++ autoc_reg |= IXGBE_AUTOC_FLU;
++ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
++ msleep(10);
++ }
++
++ led_reg &= ~IXGBE_LED_MODE_MASK(index);
++ led_reg |= IXGBE_LED_BLINK(index);
++ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
++ IXGBE_WRITE_FLUSH(hw);
++
++ return 0;
++}
++
++/**
++ * ixgbe_blink_led_stop_82598 - Stop blinking LED based on index.
++ * @hw: pointer to hardware structure
++ * @index: led number to stop blinking
++ **/
++static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index)
++{
++ u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
++ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
++
++ autoc_reg &= ~IXGBE_AUTOC_FLU;
++ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
++ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
++
++ led_reg &= ~IXGBE_LED_MODE_MASK(index);
++ led_reg &= ~IXGBE_LED_BLINK(index);
++ led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
++ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
++ IXGBE_WRITE_FLUSH(hw);
++
++ return 0;
++}
++
++/**
++ * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
++ * @hw: pointer to hardware structure
++ * @reg: analog register to read
++ * @val: read value
++ *
++ * Performs read operation to Atlas analog register specified.
++ **/
++s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
++{
++ u32 atlas_ctl;
++
++ IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
++ IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
++ IXGBE_WRITE_FLUSH(hw);
++ udelay(10);
++ atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
++ *val = (u8)atlas_ctl;
++
++ return 0;
++}
++
++/**
++ * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
++ * @hw: pointer to hardware structure
++ * @reg: atlas register to write
++ * @val: value to write
++ *
++ * Performs write operation to Atlas analog register specified.
++ **/
++s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
++{
++ u32 atlas_ctl;
++
++ atlas_ctl = (reg << 8) | val;
++ IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
++ IXGBE_WRITE_FLUSH(hw);
++ udelay(10);
++
++ return 0;
++}
++
++/**
++ * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
++ * @hw: pointer to hardware structure
++ *
++ * Determines physical layer capabilities of the current configuration.
++ **/
++s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
++{
++ s32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
++
++ switch (hw->device_id) {
++ case IXGBE_DEV_ID_82598EB_CX4:
++ case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
++ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
++ break;
++ case IXGBE_DEV_ID_82598AF_DUAL_PORT:
++ case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
++ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
++ break;
++ case IXGBE_DEV_ID_82598EB_XF_LR:
++ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
++ break;
++
++ default:
++ physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
++ break;
++ }
++
++ return physical_layer;
++}
++
+ static struct ixgbe_mac_operations mac_ops_82598 = {
+- .reset = &ixgbe_reset_hw_82598,
++ .init_hw = &ixgbe_init_hw_generic,
++ .reset_hw = &ixgbe_reset_hw_82598,
++ .start_hw = &ixgbe_start_hw_generic,
++ .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
+ .get_media_type = &ixgbe_get_media_type_82598,
++ .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598,
++ .get_mac_addr = &ixgbe_get_mac_addr_generic,
++ .stop_adapter = &ixgbe_stop_adapter_generic,
++ .read_analog_reg8 = &ixgbe_read_analog_reg8_82598,
++ .write_analog_reg8 = &ixgbe_write_analog_reg8_82598,
+ .setup_link = &ixgbe_setup_mac_link_82598,
+- .check_link = &ixgbe_check_mac_link_82598,
+ .setup_link_speed = &ixgbe_setup_mac_link_speed_82598,
+- .get_link_settings = &ixgbe_get_link_settings_82598,
++ .check_link = &ixgbe_check_mac_link_82598,
++ .get_link_capabilities = &ixgbe_get_link_capabilities_82598,
++ .led_on = &ixgbe_led_on_generic,
++ .led_off = &ixgbe_led_off_generic,
++ .blink_led_start = &ixgbe_blink_led_start_82598,
++ .blink_led_stop = &ixgbe_blink_led_stop_82598,
++ .set_rar = &ixgbe_set_rar_generic,
++ .clear_rar = &ixgbe_clear_rar_generic,
++ .set_vmdq = &ixgbe_set_vmdq_82598,
++ .clear_vmdq = &ixgbe_clear_vmdq_82598,
++ .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
++ .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
++ .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
++ .enable_mc = &ixgbe_enable_mc_generic,
++ .disable_mc = &ixgbe_disable_mc_generic,
++ .clear_vfta = &ixgbe_clear_vfta_82598,
++ .set_vfta = &ixgbe_set_vfta_82598,
++ .setup_fc = &ixgbe_setup_fc_82598,
++};
++
++static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
++ .init_params = &ixgbe_init_eeprom_params_generic,
++ .read = &ixgbe_read_eeprom_generic,
++ .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
++ .update_checksum = &ixgbe_update_eeprom_checksum_generic,
++};
++
++static struct ixgbe_phy_operations phy_ops_82598 = {
++ .identify = &ixgbe_identify_phy_generic,
++ /* .identify_sfp = &ixgbe_identify_sfp_module_generic, */
++ .reset = &ixgbe_reset_phy_generic,
++ .read_reg = &ixgbe_read_phy_reg_generic,
++ .write_reg = &ixgbe_write_phy_reg_generic,
++ .setup_link = &ixgbe_setup_phy_link_generic,
++ .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
+ };
+
+ struct ixgbe_info ixgbe_82598_info = {
+ .mac = ixgbe_mac_82598EB,
+ .get_invariants = &ixgbe_get_invariants_82598,
+ .mac_ops = &mac_ops_82598,
++ .eeprom_ops = &eeprom_ops_82598,
++ .phy_ops = &phy_ops_82598,
+ };
+
+--- a/drivers/net/ixgbe/ixgbe_common.c
++++ b/drivers/net/ixgbe/ixgbe_common.c
+@@ -1,7 +1,7 @@
+ /*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+- Copyright(c) 1999 - 2007 Intel Corporation.
++ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+@@ -20,7 +20,6 @@
+ the file called "COPYING".
+
+ Contact Information:
+- Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+@@ -33,20 +32,28 @@
+ #include "ixgbe_common.h"
+ #include "ixgbe_phy.h"
+
+-static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
+-
+ static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw);
++static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
+ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
+ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
++static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
++static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
++static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
++ u16 count);
++static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
++static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
++static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
++static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
+ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
+
+-static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
+-static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
++static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
++static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
+ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
+ static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr);
++static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
+
+ /**
+- * ixgbe_start_hw - Prepare hardware for TX/RX
++ * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type, clears
+@@ -54,7 +61,7 @@ static void ixgbe_add_mc_addr(struct ixg
+ * table, VLAN filter table, calls routine to set up link and flow control
+ * settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+-s32 ixgbe_start_hw(struct ixgbe_hw *hw)
++s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
+ {
+ u32 ctrl_ext;
+
+@@ -62,22 +69,22 @@ s32 ixgbe_start_hw(struct ixgbe_hw *hw)
+ hw->phy.media_type = hw->mac.ops.get_media_type(hw);
+
+ /* Identify the PHY */
+- ixgbe_identify_phy(hw);
++ hw->phy.ops.identify(hw);
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table
+ */
+- ixgbe_init_rx_addrs(hw);
++ hw->mac.ops.init_rx_addrs(hw);
+
+ /* Clear the VLAN filter table */
+- ixgbe_clear_vfta(hw);
++ hw->mac.ops.clear_vfta(hw);
+
+ /* Set up link */
+ hw->mac.ops.setup_link(hw);
+
+ /* Clear statistics registers */
+- ixgbe_clear_hw_cntrs(hw);
++ hw->mac.ops.clear_hw_cntrs(hw);
+
+ /* Set No Snoop Disable */
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+@@ -92,34 +99,34 @@ s32 ixgbe_start_hw(struct ixgbe_hw *hw)
+ }
+
+ /**
+- * ixgbe_init_hw - Generic hardware initialization
++ * ixgbe_init_hw_generic - Generic hardware initialization
+ * @hw: pointer to hardware structure
+ *
+- * Initialize the hardware by reseting the hardware, filling the bus info
++ * Initialize the hardware by resetting the hardware, filling the bus info
+ * structure and media type, clears all on chip counters, initializes receive
+ * address registers, multicast table, VLAN filter table, calls routine to set
+ * up link and flow control settings, and leaves transmit and receive units
+ * disabled and uninitialized
+ **/
+-s32 ixgbe_init_hw(struct ixgbe_hw *hw)
++s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
+ {
+ /* Reset the hardware */
+- hw->mac.ops.reset(hw);
++ hw->mac.ops.reset_hw(hw);
+
+ /* Start the HW */
+- ixgbe_start_hw(hw);
++ hw->mac.ops.start_hw(hw);
+
+ return 0;
+ }
+
+ /**
+- * ixgbe_clear_hw_cntrs - Generic clear hardware counters
++ * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
+ * @hw: pointer to hardware structure
+ *
+ * Clears all hardware statistics counters by reading them from the hardware
+ * Statistics counters are clear on read.
+ **/
+-static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
++s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
+ {
+ u16 i = 0;
+
+@@ -191,7 +198,36 @@ static s32 ixgbe_clear_hw_cntrs(struct i
+ }
+
+ /**
+- * ixgbe_get_mac_addr - Generic get MAC address
++ * ixgbe_read_pba_num_generic - Reads part number from EEPROM
++ * @hw: pointer to hardware structure
++ * @pba_num: stores the part number from the EEPROM
++ *
++ * Reads the part number from the EEPROM.
++ **/
++s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
++{
++ s32 ret_val;
++ u16 data;
++
++ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
++ if (ret_val) {
++ hw_dbg(hw, "NVM Read Error\n");
++ return ret_val;
++ }
++ *pba_num = (u32)(data << 16);
++
++ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
++ if (ret_val) {
++ hw_dbg(hw, "NVM Read Error\n");
++ return ret_val;
++ }
++ *pba_num |= data;
++
++ return 0;
++}
++
++/**
++ * ixgbe_get_mac_addr_generic - Generic get MAC address
+ * @hw: pointer to hardware structure
+ * @mac_addr: Adapter MAC address
+ *
+@@ -199,7 +235,7 @@ static s32 ixgbe_clear_hw_cntrs(struct i
+ * A reset of the adapter must be performed prior to calling this function
+ * in order for the MAC address to have been loaded from the EEPROM into RAR0
+ **/
+-s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
++s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
+ {
+ u32 rar_high;
+ u32 rar_low;
+@@ -217,30 +253,8 @@ s32 ixgbe_get_mac_addr(struct ixgbe_hw *
+ return 0;
+ }
+
+-s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num)
+-{
+- s32 ret_val;
+- u16 data;
+-
+- ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM0_PTR, &data);
+- if (ret_val) {
+- hw_dbg(hw, "NVM Read Error\n");
+- return ret_val;
+- }
+- *part_num = (u32)(data << 16);
+-
+- ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM1_PTR, &data);
+- if (ret_val) {
+- hw_dbg(hw, "NVM Read Error\n");
+- return ret_val;
+- }
+- *part_num |= data;
+-
+- return 0;
+-}
+-
+ /**
+- * ixgbe_stop_adapter - Generic stop TX/RX units
++ * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+@@ -248,7 +262,7 @@ s32 ixgbe_read_part_num(struct ixgbe_hw
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+-s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
++s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
+ {
+ u32 number_of_queues;
+ u32 reg_val;
+@@ -264,6 +278,7 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *
+ reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ reg_val &= ~(IXGBE_RXCTRL_RXEN);
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
++ IXGBE_WRITE_FLUSH(hw);
+ msleep(2);
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+@@ -273,7 +288,7 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *
+ IXGBE_READ_REG(hw, IXGBE_EICR);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+- number_of_queues = hw->mac.num_tx_queues;
++ number_of_queues = hw->mac.max_tx_queues;
+ for (i = 0; i < number_of_queues; i++) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
+ if (reg_val & IXGBE_TXDCTL_ENABLE) {
+@@ -282,15 +297,22 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *
+ }
+ }
+
++ /*
++ * Prevent the PCI-E bus from from hanging by disabling PCI-E master
++ * access and verify no pending requests
++ */
++ if (ixgbe_disable_pcie_master(hw) != 0)
++ hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
++
+ return 0;
+ }
+
+ /**
+- * ixgbe_led_on - Turns on the software controllable LEDs.
++ * ixgbe_led_on_generic - Turns on the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @index: led number to turn on
+ **/
+-s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index)
++s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
+ {
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+@@ -304,11 +326,11 @@ s32 ixgbe_led_on(struct ixgbe_hw *hw, u3
+ }
+
+ /**
+- * ixgbe_led_off - Turns off the software controllable LEDs.
++ * ixgbe_led_off_generic - Turns off the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @index: led number to turn off
+ **/
+-s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index)
++s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
+ {
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+@@ -321,15 +343,14 @@ s32 ixgbe_led_off(struct ixgbe_hw *hw, u
+ return 0;
+ }
+
+-
+ /**
+- * ixgbe_init_eeprom - Initialize EEPROM params
++ * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ **/
+-s32 ixgbe_init_eeprom(struct ixgbe_hw *hw)
++s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
+ {
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 eec;
+@@ -337,6 +358,9 @@ s32 ixgbe_init_eeprom(struct ixgbe_hw *h
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->type = ixgbe_eeprom_none;
++ /* Set default semaphore delay to 10ms which is a well
++ * tested value */
++ eeprom->semaphore_delay = 10;
+
+ /*
+ * Check for EEPROM present first.
+@@ -369,18 +393,85 @@ s32 ixgbe_init_eeprom(struct ixgbe_hw *h
+ }
+
+ /**
+- * ixgbe_read_eeprom - Read EEPROM word using EERD
++ * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
++ * @hw: pointer to hardware structure
++ * @offset: offset within the EEPROM to be read
++ * @data: read 16 bit value from EEPROM
++ *
++ * Reads 16 bit value from EEPROM through bit-bang method
++ **/
++s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
++ u16 *data)
++{
++ s32 status;
++ u16 word_in;
++ u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
++
++ hw->eeprom.ops.init_params(hw);
++
++ if (offset >= hw->eeprom.word_size) {
++ status = IXGBE_ERR_EEPROM;
++ goto out;
++ }
++
++ /* Prepare the EEPROM for reading */
++ status = ixgbe_acquire_eeprom(hw);
++
++ if (status == 0) {
++ if (ixgbe_ready_eeprom(hw) != 0) {
++ ixgbe_release_eeprom(hw);
++ status = IXGBE_ERR_EEPROM;
++ }
++ }
++
++ if (status == 0) {
++ ixgbe_standby_eeprom(hw);
++
++ /*
++ * Some SPI eeproms use the 8th address bit embedded in the
++ * opcode
++ */
++ if ((hw->eeprom.address_bits == 8) && (offset >= 128))
++ read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
++
++ /* Send the READ command (opcode + addr) */
++ ixgbe_shift_out_eeprom_bits(hw, read_opcode,
++ IXGBE_EEPROM_OPCODE_BITS);
++ ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
++ hw->eeprom.address_bits);
++
++ /* Read the data. */
++ word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
++ *data = (word_in >> 8) | (word_in << 8);
++
++ /* End this read operation */
++ ixgbe_release_eeprom(hw);
++ }
++
++out:
++ return status;
++}
++
++/**
++ * ixgbe_read_eeprom_generic - Read EEPROM word using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+-s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data)
++s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
+ {
+ u32 eerd;
+ s32 status;
+
++ hw->eeprom.ops.init_params(hw);
++
++ if (offset >= hw->eeprom.word_size) {
++ status = IXGBE_ERR_EEPROM;
++ goto out;
++ }
++
+ eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) +
+ IXGBE_EEPROM_READ_REG_START;
+
+@@ -389,10 +480,11 @@ s32 ixgbe_read_eeprom(struct ixgbe_hw *h
+
+ if (status == 0)
+ *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
+- IXGBE_EEPROM_READ_REG_DATA);
++ IXGBE_EEPROM_READ_REG_DATA);
+ else
+ hw_dbg(hw, "Eeprom read timed out\n");
+
++out:
+ return status;
+ }
+
+@@ -420,6 +512,58 @@ static s32 ixgbe_poll_eeprom_eerd_done(s
+ }
+
+ /**
++ * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
++ * @hw: pointer to hardware structure
++ *
++ * Prepares EEPROM for access using bit-bang method. This function should
++ * be called before issuing a command to the EEPROM.
++ **/
++static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
++{
++ s32 status = 0;
++ u32 eec;
++ u32 i;
++
++ if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
++ status = IXGBE_ERR_SWFW_SYNC;
++
++ if (status == 0) {
++ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
++
++ /* Request EEPROM Access */
++ eec |= IXGBE_EEC_REQ;
++ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
++
++ for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
++ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
++ if (eec & IXGBE_EEC_GNT)
++ break;
++ udelay(5);
++ }
++
++ /* Release if grant not acquired */
++ if (!(eec & IXGBE_EEC_GNT)) {
++ eec &= ~IXGBE_EEC_REQ;
++ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
++ hw_dbg(hw, "Could not acquire EEPROM grant\n");
++
++ ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
++ status = IXGBE_ERR_EEPROM;
++ }
++ }
++
++ /* Setup EEPROM for Read/Write */
++ if (status == 0) {
++ /* Clear CS and SK */
++ eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
++ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
++ IXGBE_WRITE_FLUSH(hw);
++ udelay(1);
++ }
++ return status;
++}
++
++/**
+ * ixgbe_get_eeprom_semaphore - Get hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+@@ -475,7 +619,7 @@ static s32 ixgbe_get_eeprom_semaphore(st
+ */
+ if (i >= timeout) {
+ hw_dbg(hw, "Driver can't access the Eeprom - Semaphore "
+- "not granted.\n");
++ "not granted.\n");
+ ixgbe_release_eeprom_semaphore(hw);
+ status = IXGBE_ERR_EEPROM;
+ }
+@@ -503,6 +647,217 @@ static void ixgbe_release_eeprom_semapho
+ }
+
+ /**
++ * ixgbe_ready_eeprom - Polls for EEPROM ready
++ * @hw: pointer to hardware structure
++ **/
++static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
++{
++ s32 status = 0;
++ u16 i;
++ u8 spi_stat_reg;
++
++ /*
++ * Read "Status Register" repeatedly until the LSB is cleared. The
++ * EEPROM will signal that the command has been completed by clearing
++ * bit 0 of the internal status register. If it's not cleared within
++ * 5 milliseconds, then error out.
++ */
++ for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
++ ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
++ IXGBE_EEPROM_OPCODE_BITS);
++ spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
++ if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
++ break;
++
++ udelay(5);
++ ixgbe_standby_eeprom(hw);
++ };
++
++ /*
++ * On some parts, SPI write time could vary from 0-20mSec on 3.3V
++ * devices (and only 0-5mSec on 5V devices)
++ */
++ if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
++ hw_dbg(hw, "SPI EEPROM Status error\n");
++ status = IXGBE_ERR_EEPROM;
++ }
++
++ return status;
++}
++
++/**
++ * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
++ * @hw: pointer to hardware structure
++ **/
++static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
++{
++ u32 eec;
++
++ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
++
++ /* Toggle CS to flush commands */
++ eec |= IXGBE_EEC_CS;
++ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
++ IXGBE_WRITE_FLUSH(hw);
++ udelay(1);
++ eec &= ~IXGBE_EEC_CS;
++ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
++ IXGBE_WRITE_FLUSH(hw);
++ udelay(1);
++}
++
++/**
++ * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
++ * @hw: pointer to hardware structure
++ * @data: data to send to the EEPROM
++ * @count: number of bits to shift out
++ **/
++static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
++ u16 count)
++{
++ u32 eec;
++ u32 mask;
++ u32 i;
++
++ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
++
++ /*
++ * Mask is used to shift "count" bits of "data" out to the EEPROM
++ * one bit at a time. Determine the starting bit based on count
++ */
++ mask = 0x01 << (count - 1);
++
++ for (i = 0; i < count; i++) {
++ /*
++ * A "1" is shifted out to the EEPROM by setting bit "DI" to a
++ * "1", and then raising and then lowering the clock (the SK
++ * bit controls the clock input to the EEPROM). A "0" is
++ * shifted out to the EEPROM by setting "DI" to "0" and then
++ * raising and then lowering the clock.
++ */
++ if (data & mask)
++ eec |= IXGBE_EEC_DI;
++ else
++ eec &= ~IXGBE_EEC_DI;
++
++ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
++ IXGBE_WRITE_FLUSH(hw);
++
++ udelay(1);
++
++ ixgbe_raise_eeprom_clk(hw, &eec);
++ ixgbe_lower_eeprom_clk(hw, &eec);
++
++ /*
++ * Shift mask to signify next bit of data to shift in to the
++ * EEPROM
++ */
++ mask = mask >> 1;
++ };
++
++ /* We leave the "DI" bit set to "0" when we leave this routine. */
++ eec &= ~IXGBE_EEC_DI;
++ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
++ IXGBE_WRITE_FLUSH(hw);
++}
++
++/**
++ * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
++ * @hw: pointer to hardware structure
++ **/
++static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
++{
++ u32 eec;
++ u32 i;
++ u16 data = 0;
++
++ /*
++ * In order to read a register from the EEPROM, we need to shift
++ * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
++ * the clock input to the EEPROM (setting the SK bit), and then reading
++ * the value of the "DO" bit. During this "shifting in" process the
++ * "DI" bit should always be clear.
++ */
++ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
++
++ eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
++
++ for (i = 0; i < count; i++) {
++ data = data << 1;
++ ixgbe_raise_eeprom_clk(hw, &eec);
++
++ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
++
++ eec &= ~(IXGBE_EEC_DI);
++ if (eec & IXGBE_EEC_DO)
++ data |= 1;
++
++ ixgbe_lower_eeprom_clk(hw, &eec);
++ }
++
++ return data;
++}
++
++/**
++ * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
++ * @hw: pointer to hardware structure
++ * @eec: EEC register's current value
++ **/
++static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
++{
++ /*
++ * Raise the clock input to the EEPROM
++ * (setting the SK bit), then delay
++ */
++ *eec = *eec | IXGBE_EEC_SK;
++ IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
++ IXGBE_WRITE_FLUSH(hw);
++ udelay(1);
++}
++
++/**
++ * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
++ * @hw: pointer to hardware structure
++ * @eecd: EECD's current value
++ **/
++static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
++{
++ /*
++ * Lower the clock input to the EEPROM (clearing the SK bit), then
++ * delay
++ */
++ *eec = *eec & ~IXGBE_EEC_SK;
++ IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
++ IXGBE_WRITE_FLUSH(hw);
++ udelay(1);
++}
++
++/**
++ * ixgbe_release_eeprom - Release EEPROM, release semaphores
++ * @hw: pointer to hardware structure
++ **/
++static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
++{
++ u32 eec;
++
++ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
++
++ eec |= IXGBE_EEC_CS; /* Pull CS high */
++ eec &= ~IXGBE_EEC_SK; /* Lower SCK */
++
++ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
++ IXGBE_WRITE_FLUSH(hw);
++
++ udelay(1);
++
++ /* Stop requesting EEPROM access */
++ eec &= ~IXGBE_EEC_REQ;
++ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
++
++ ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
++}
++
++/**
+ * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ **/
+@@ -517,7 +872,7 @@ static u16 ixgbe_calc_eeprom_checksum(st
+
+ /* Include 0x0-0x3F in the checksum */
+ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
+- if (ixgbe_read_eeprom(hw, i, &word) != 0) {
++ if (hw->eeprom.ops.read(hw, i, &word) != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+@@ -526,15 +881,15 @@ static u16 ixgbe_calc_eeprom_checksum(st
+
+ /* Include all data from pointers except for the fw pointer */
+ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+- ixgbe_read_eeprom(hw, i, &pointer);
++ hw->eeprom.ops.read(hw, i, &pointer);
+
+ /* Make sure the pointer seems valid */
+ if (pointer != 0xFFFF && pointer != 0) {
+- ixgbe_read_eeprom(hw, pointer, &length);
++ hw->eeprom.ops.read(hw, pointer, &length);
+
+ if (length != 0xFFFF && length != 0) {
+ for (j = pointer+1; j <= pointer+length; j++) {
+- ixgbe_read_eeprom(hw, j, &word);
++ hw->eeprom.ops.read(hw, j, &word);
+ checksum += word;
+ }
+ }
+@@ -547,14 +902,15 @@ static u16 ixgbe_calc_eeprom_checksum(st
+ }
+
+ /**
+- * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum
++ * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ **/
+-s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
++s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
++ u16 *checksum_val)
+ {
+ s32 status;
+ u16 checksum;
+@@ -565,12 +921,12 @@ s32 ixgbe_validate_eeprom_checksum(struc
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+- status = ixgbe_read_eeprom(hw, 0, &checksum);
++ status = hw->eeprom.ops.read(hw, 0, &checksum);
+
+ if (status == 0) {
+ checksum = ixgbe_calc_eeprom_checksum(hw);
+
+- ixgbe_read_eeprom(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
++ hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
+
+ /*
+ * Verify read checksum from EEPROM is the same as
+@@ -590,6 +946,33 @@ s32 ixgbe_validate_eeprom_checksum(struc
+ }
+
+ /**
++ * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
++ * @hw: pointer to hardware structure
++ **/
++s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
++{
++ s32 status;
++ u16 checksum;
++
++ /*
++ * Read the first word from the EEPROM. If this times out or fails, do
++ * not continue or we could be in for a very long wait while every
++ * EEPROM read fails
++ */
++ status = hw->eeprom.ops.read(hw, 0, &checksum);
++
++ if (status == 0) {
++ checksum = ixgbe_calc_eeprom_checksum(hw);
++ status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
++ checksum);
++ } else {
++ hw_dbg(hw, "EEPROM read failed\n");
++ }
++
++ return status;
++}
++
++/**
+ * ixgbe_validate_mac_addr - Validate MAC address
+ * @mac_addr: pointer to MAC address.
+ *
+@@ -607,61 +990,140 @@ s32 ixgbe_validate_mac_addr(u8 *mac_addr
+ status = IXGBE_ERR_INVALID_MAC_ADDR;
+ /* Reject the zero address */
+ else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+- mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
++ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
+ status = IXGBE_ERR_INVALID_MAC_ADDR;
+
+ return status;
+ }
+
+ /**
+- * ixgbe_set_rar - Set RX address register
++ * ixgbe_set_rar_generic - Set Rx address register
+ * @hw: pointer to hardware structure
+- * @addr: Address to put into receive address register
+ * @index: Receive address register to write
+- * @vind: Vind to set RAR to
++ * @addr: Address to put into receive address register
++ * @vmdq: VMDq "set" or "pool" index
+ * @enable_addr: set flag that address is active
+ *
+ * Puts an ethernet address into a receive address register.
+ **/
+-s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind,
+- u32 enable_addr)
++s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
++ u32 enable_addr)
+ {
+ u32 rar_low, rar_high;
++ u32 rar_entries = hw->mac.num_rar_entries;
+
+- /*
+- * HW expects these in little endian so we reverse the byte order from
+- * network order (big endian) to little endian
+- */
+- rar_low = ((u32)addr[0] |
+- ((u32)addr[1] << 8) |
+- ((u32)addr[2] << 16) |
+- ((u32)addr[3] << 24));
+-
+- rar_high = ((u32)addr[4] |
+- ((u32)addr[5] << 8) |
+- ((vind << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK));
++ /* setup VMDq pool selection before this RAR gets enabled */
++ hw->mac.ops.set_vmdq(hw, index, vmdq);
++
++ /* Make sure we are using a valid rar index range */
++ if (index < rar_entries) {
++ /*
++ * HW expects these in little endian so we reverse the byte
++ * order from network order (big endian) to little endian
++ */
++ rar_low = ((u32)addr[0] |
++ ((u32)addr[1] << 8) |
++ ((u32)addr[2] << 16) |
++ ((u32)addr[3] << 24));
++ /*
++ * Some parts put the VMDq setting in the extra RAH bits,
++ * so save everything except the lower 16 bits that hold part
++ * of the address and the address valid bit.
++ */
++ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
++ rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
++ rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
+
+- if (enable_addr != 0)
+- rar_high |= IXGBE_RAH_AV;
++ if (enable_addr != 0)
++ rar_high |= IXGBE_RAH_AV;
+
+- IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+- IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
++ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
++ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
++ } else {
++ hw_dbg(hw, "RAR index %d is out of range.\n", index);
++ }
+
+ return 0;
+ }
+
+ /**
+- * ixgbe_init_rx_addrs - Initializes receive address filters.
++ * ixgbe_clear_rar_generic - Remove Rx address register
++ * @hw: pointer to hardware structure
++ * @index: Receive address register to write
++ *
++ * Clears an ethernet address from a receive address register.
++ **/
++s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
++{
++ u32 rar_high;
++ u32 rar_entries = hw->mac.num_rar_entries;
++
++ /* Make sure we are using a valid rar index range */
++ if (index < rar_entries) {
++ /*
++ * Some parts put the VMDq setting in the extra RAH bits,
++ * so save everything except the lower 16 bits that hold part
++ * of the address and the address valid bit.
++ */
++ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
++ rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
++
++ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
++ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
++ } else {
++ hw_dbg(hw, "RAR index %d is out of range.\n", index);
++ }
++
++ /* clear VMDq pool/queue selection for this RAR */
++ hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
++
++ return 0;
++}
++
++/**
++ * ixgbe_enable_rar - Enable Rx address register
++ * @hw: pointer to hardware structure
++ * @index: index into the RAR table
++ *
++ * Enables the select receive address register.
++ **/
++static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index)
++{
++ u32 rar_high;
++
++ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
++ rar_high |= IXGBE_RAH_AV;
++ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
++}
++
++/**
++ * ixgbe_disable_rar - Disable Rx address register
++ * @hw: pointer to hardware structure
++ * @index: index into the RAR table
++ *
++ * Disables the select receive address register.
++ **/
++static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index)
++{
++ u32 rar_high;
++
++ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
++ rar_high &= (~IXGBE_RAH_AV);
++ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
++}
++
++/**
++ * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
+ * @hw: pointer to hardware structure
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+- * of the receive addresss registers. Clears the multicast table. Assumes
++ * of the receive address registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ **/
+-static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
++s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
+ {
+ u32 i;
+- u32 rar_entries = hw->mac.num_rx_addrs;
++ u32 rar_entries = hw->mac.num_rar_entries;
+
+ /*
+ * If the current mac address is valid, assume it is a software override
+@@ -671,29 +1133,30 @@ static s32 ixgbe_init_rx_addrs(struct ix
+ if (ixgbe_validate_mac_addr(hw->mac.addr) ==
+ IXGBE_ERR_INVALID_MAC_ADDR) {
+ /* Get the MAC address from the RAR0 for later reference */
+- ixgbe_get_mac_addr(hw, hw->mac.addr);
++ hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+ hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
+- hw->mac.addr[0], hw->mac.addr[1],
+- hw->mac.addr[2]);
++ hw->mac.addr[0], hw->mac.addr[1],
++ hw->mac.addr[2]);
+ hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
+- hw->mac.addr[4], hw->mac.addr[5]);
++ hw->mac.addr[4], hw->mac.addr[5]);
+ } else {
+ /* Setup the receive address. */
+ hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
+ hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ",
+- hw->mac.addr[0], hw->mac.addr[1],
+- hw->mac.addr[2]);
++ hw->mac.addr[0], hw->mac.addr[1],
++ hw->mac.addr[2]);
+ hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
+- hw->mac.addr[4], hw->mac.addr[5]);
++ hw->mac.addr[4], hw->mac.addr[5]);
+
+- ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
++ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+ }
++ hw->addr_ctrl.overflow_promisc = 0;
+
+ hw->addr_ctrl.rar_used_count = 1;
+
+ /* Zero out the other receive addresses. */
+- hw_dbg(hw, "Clearing RAR[1-15]\n");
++ hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
+ for (i = 1; i < rar_entries; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+@@ -705,9 +1168,113 @@ static s32 ixgbe_init_rx_addrs(struct ix
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
+
+ hw_dbg(hw, " Clearing MTA\n");
+- for (i = 0; i < IXGBE_MC_TBL_SIZE; i++)
++ for (i = 0; i < hw->mac.mcft_size; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
+
++ if (hw->mac.ops.init_uta_tables)
++ hw->mac.ops.init_uta_tables(hw);
++
++ return 0;
++}
++
++/**
++ * ixgbe_add_uc_addr - Adds a secondary unicast address.
++ * @hw: pointer to hardware structure
++ * @addr: new address
++ *
++ * Adds it to unused receive address register or goes into promiscuous mode.
++ **/
++static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
++{
++ u32 rar_entries = hw->mac.num_rar_entries;
++ u32 rar;
++
++ hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
++ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
++
++ /*
++ * Place this address in the RAR if there is room,
++ * else put the controller into promiscuous mode
++ */
++ if (hw->addr_ctrl.rar_used_count < rar_entries) {
++ rar = hw->addr_ctrl.rar_used_count -
++ hw->addr_ctrl.mc_addr_in_rar_count;
++ hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
++ hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
++ hw->addr_ctrl.rar_used_count++;
++ } else {
++ hw->addr_ctrl.overflow_promisc++;
++ }
++
++ hw_dbg(hw, "ixgbe_add_uc_addr Complete\n");
++}
++
++/**
++ * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
++ * @hw: pointer to hardware structure
++ * @addr_list: the list of new addresses
++ * @addr_count: number of addresses
++ * @next: iterator function to walk the address list
++ *
++ * The given list replaces any existing list. Clears the secondary addrs from
++ * receive address registers. Uses unused receive address registers for the
++ * first secondary addresses, and falls back to promiscuous mode as needed.
++ *
++ * Drivers using secondary unicast addresses must set user_set_promisc when
++ * manually putting the device into promiscuous mode.
++ **/
++s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
++ u32 addr_count, ixgbe_mc_addr_itr next)
++{
++ u8 *addr;
++ u32 i;
++ u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
++ u32 uc_addr_in_use;
++ u32 fctrl;
++ u32 vmdq;
++
++ /*
++ * Clear accounting of old secondary address list,
++ * don't count RAR[0]
++ */
++ uc_addr_in_use = hw->addr_ctrl.rar_used_count -
++ hw->addr_ctrl.mc_addr_in_rar_count - 1;
++ hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
++ hw->addr_ctrl.overflow_promisc = 0;
++
++ /* Zero out the other receive addresses */
++ hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use);
++ for (i = 1; i <= uc_addr_in_use; i++) {
++ IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
++ IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
++ }
++
++ /* Add the new addresses */
++ for (i = 0; i < addr_count; i++) {
++ hw_dbg(hw, " Adding the secondary addresses:\n");
++ addr = next(hw, &addr_list, &vmdq);
++ ixgbe_add_uc_addr(hw, addr, vmdq);
++ }
++
++ if (hw->addr_ctrl.overflow_promisc) {
++ /* enable promisc if not already in overflow or set by user */
++ if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
++ hw_dbg(hw, " Entering address overflow promisc mode\n");
++ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
++ fctrl |= IXGBE_FCTRL_UPE;
++ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
++ }
++ } else {
++ /* only disable if set by overflow, not by user */
++ if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
++ hw_dbg(hw, " Leaving address overflow promisc mode\n");
++ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
++ fctrl &= ~IXGBE_FCTRL_UPE;
++ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
++ }
++ }
++
++ hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
+ return 0;
+ }
+
+@@ -720,7 +1287,7 @@ static s32 ixgbe_init_rx_addrs(struct ix
+ * bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ * incoming rx multicast addresses, to determine the bit-vector to check in
+ * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+- * by the MO field of the MCSTCTRL. The MO field is set during initalization
++ * by the MO field of the MCSTCTRL. The MO field is set during initialization
+ * to mc_filter_type.
+ **/
+ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+@@ -728,19 +1295,19 @@ static s32 ixgbe_mta_vector(struct ixgbe
+ u32 vector = 0;
+
+ switch (hw->mac.mc_filter_type) {
+- case 0: /* use bits [47:36] of the address */
++ case 0: /* use bits [47:36] of the address */
+ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ break;
+- case 1: /* use bits [46:35] of the address */
++ case 1: /* use bits [46:35] of the address */
+ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ break;
+- case 2: /* use bits [45:34] of the address */
++ case 2: /* use bits [45:34] of the address */
+ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ break;
+- case 3: /* use bits [43:32] of the address */
++ case 3: /* use bits [43:32] of the address */
+ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ break;
+- default: /* Invalid mc_filter_type */
++ default: /* Invalid mc_filter_type */
+ hw_dbg(hw, "MC filter type param set incorrectly\n");
+ break;
+ }
+@@ -794,21 +1361,22 @@ static void ixgbe_set_mta(struct ixgbe_h
+ **/
+ static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr)
+ {
+- u32 rar_entries = hw->mac.num_rx_addrs;
++ u32 rar_entries = hw->mac.num_rar_entries;
++ u32 rar;
+
+ hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n",
+- mc_addr[0], mc_addr[1], mc_addr[2],
+- mc_addr[3], mc_addr[4], mc_addr[5]);
++ mc_addr[0], mc_addr[1], mc_addr[2],
++ mc_addr[3], mc_addr[4], mc_addr[5]);
+
+ /*
+ * Place this multicast address in the RAR if there is room,
+ * else put it in the MTA
+ */
+ if (hw->addr_ctrl.rar_used_count < rar_entries) {
+- ixgbe_set_rar(hw, hw->addr_ctrl.rar_used_count,
+- mc_addr, 0, IXGBE_RAH_AV);
+- hw_dbg(hw, "Added a multicast address to RAR[%d]\n",
+- hw->addr_ctrl.rar_used_count);
++ /* use RAR from the end up for multicast */
++ rar = rar_entries - hw->addr_ctrl.mc_addr_in_rar_count - 1;
++ hw->mac.ops.set_rar(hw, rar, mc_addr, 0, IXGBE_RAH_AV);
++ hw_dbg(hw, "Added a multicast address to RAR[%d]\n", rar);
+ hw->addr_ctrl.rar_used_count++;
+ hw->addr_ctrl.mc_addr_in_rar_count++;
+ } else {
+@@ -819,22 +1387,23 @@ static void ixgbe_add_mc_addr(struct ixg
+ }
+
+ /**
+- * ixgbe_update_mc_addr_list - Updates MAC list of multicast addresses
++ * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
+ * @hw: pointer to hardware structure
+ * @mc_addr_list: the list of new multicast addresses
+ * @mc_addr_count: number of addresses
+- * @pad: number of bytes between addresses in the list
++ * @next: iterator function to walk the multicast address list
+ *
+ * The given list replaces any existing list. Clears the MC addrs from receive
+- * address registers and the multicast table. Uses unsed receive address
++ * address registers and the multicast table. Uses unused receive address
+ * registers for the first multicast addresses, and hashes the rest into the
+ * multicast table.
+ **/
+-s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
+- u32 mc_addr_count, u32 pad)
++s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
++ u32 mc_addr_count, ixgbe_mc_addr_itr next)
+ {
+ u32 i;
+- u32 rar_entries = hw->mac.num_rx_addrs;
++ u32 rar_entries = hw->mac.num_rar_entries;
++ u32 vmdq;
+
+ /*
+ * Set the new number of MC addresses that we are being requested to
+@@ -846,7 +1415,8 @@ s32 ixgbe_update_mc_addr_list(struct ixg
+ hw->addr_ctrl.mta_in_use = 0;
+
+ /* Zero out the other receive addresses. */
+- hw_dbg(hw, "Clearing RAR[1-15]\n");
++ hw_dbg(hw, "Clearing RAR[%d-%d]\n", hw->addr_ctrl.rar_used_count,
++ rar_entries - 1);
+ for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+@@ -854,186 +1424,67 @@ s32 ixgbe_update_mc_addr_list(struct ixg
+
+ /* Clear the MTA */
+ hw_dbg(hw, " Clearing MTA\n");
+- for (i = 0; i < IXGBE_MC_TBL_SIZE; i++)
++ for (i = 0; i < hw->mac.mcft_size; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
+
+ /* Add the new addresses */
+ for (i = 0; i < mc_addr_count; i++) {
+ hw_dbg(hw, " Adding the multicast addresses:\n");
+- ixgbe_add_mc_addr(hw, mc_addr_list +
+- (i * (IXGBE_ETH_LENGTH_OF_ADDRESS + pad)));
++ ixgbe_add_mc_addr(hw, next(hw, &mc_addr_list, &vmdq));
+ }
+
+ /* Enable mta */
+ if (hw->addr_ctrl.mta_in_use > 0)
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
+- IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
++ IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+
+- hw_dbg(hw, "ixgbe_update_mc_addr_list Complete\n");
++ hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
+ return 0;
+ }
+
+ /**
+- * ixgbe_clear_vfta - Clear VLAN filter table
++ * ixgbe_enable_mc_generic - Enable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+- * Clears the VLAN filer table, and the VMDq index associated with the filter
++ * Enables multicast address in RAR and the use of the multicast hash table.
+ **/
+-static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw)
++s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
+ {
+- u32 offset;
+- u32 vlanbyte;
+-
+- for (offset = 0; offset < IXGBE_VLAN_FILTER_TBL_SIZE; offset++)
+- IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+-
+- for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
+- for (offset = 0; offset < IXGBE_VLAN_FILTER_TBL_SIZE; offset++)
+- IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
+- 0);
+-
+- return 0;
+-}
++ u32 i;
++ u32 rar_entries = hw->mac.num_rar_entries;
++ struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
+
+-/**
+- * ixgbe_set_vfta - Set VLAN filter table
+- * @hw: pointer to hardware structure
+- * @vlan: VLAN id to write to VLAN filter
+- * @vind: VMDq output index that maps queue to VLAN id in VFTA
+- * @vlan_on: boolean flag to turn on/off VLAN in VFTA
+- *
+- * Turn on/off specified VLAN in the VLAN filter table.
+- **/
+-s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+- bool vlan_on)
+-{
+- u32 VftaIndex;
+- u32 BitOffset;
+- u32 VftaReg;
+- u32 VftaByte;
+-
+- /* Determine 32-bit word position in array */
+- VftaIndex = (vlan >> 5) & 0x7F; /* upper seven bits */
+-
+- /* Determine the location of the (VMD) queue index */
+- VftaByte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
+- BitOffset = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
+-
+- /* Set the nibble for VMD queue index */
+- VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex));
+- VftaReg &= (~(0x0F << BitOffset));
+- VftaReg |= (vind << BitOffset);
+- IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex), VftaReg);
+-
+- /* Determine the location of the bit for this VLAN id */
+- BitOffset = vlan & 0x1F; /* lower five bits */
+-
+- VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTA(VftaIndex));
+- if (vlan_on)
+- /* Turn on this VLAN id */
+- VftaReg |= (1 << BitOffset);
+- else
+- /* Turn off this VLAN id */
+- VftaReg &= ~(1 << BitOffset);
+- IXGBE_WRITE_REG(hw, IXGBE_VFTA(VftaIndex), VftaReg);
++ if (a->mc_addr_in_rar_count > 0)
++ for (i = (rar_entries - a->mc_addr_in_rar_count);
++ i < rar_entries; i++)
++ ixgbe_enable_rar(hw, i);
++
++ if (a->mta_in_use > 0)
++ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
++ hw->mac.mc_filter_type);
+
+ return 0;
+ }
+
+ /**
+- * ixgbe_setup_fc - Configure flow control settings
++ * ixgbe_disable_mc_generic - Disable multicast address in RAR
+ * @hw: pointer to hardware structure
+- * @packetbuf_num: packet buffer number (0-7)
+ *
+- * Configures the flow control settings based on SW configuration.
+- * This function is used for 802.3x flow control configuration only.
++ * Disables multicast address in RAR and the use of the multicast hash table.
+ **/
+-s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
++s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
+ {
+- u32 frctl_reg;
+- u32 rmcs_reg;
+-
+- if (packetbuf_num < 0 || packetbuf_num > 7)
+- hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
+- "is 0-7\n", packetbuf_num);
+-
+- frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+- frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
+-
+- rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+- rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
+-
+- /*
+- * We want to save off the original Flow Control configuration just in
+- * case we get disconnected and then reconnected into a different hub
+- * or switch with different Flow Control capabilities.
+- */
+- hw->fc.type = hw->fc.original_type;
+-
+- /*
+- * The possible values of the "flow_control" parameter are:
+- * 0: Flow control is completely disabled
+- * 1: Rx flow control is enabled (we can receive pause frames but not
+- * send pause frames).
+- * 2: Tx flow control is enabled (we can send pause frames but we do not
+- * support receiving pause frames)
+- * 3: Both Rx and TX flow control (symmetric) are enabled.
+- * other: Invalid.
+- */
+- switch (hw->fc.type) {
+- case ixgbe_fc_none:
+- break;
+- case ixgbe_fc_rx_pause:
+- /*
+- * RX Flow control is enabled,
+- * and TX Flow control is disabled.
+- */
+- frctl_reg |= IXGBE_FCTRL_RFCE;
+- break;
+- case ixgbe_fc_tx_pause:
+- /*
+- * TX Flow control is enabled, and RX Flow control is disabled,
+- * by a software over-ride.
+- */
+- rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+- break;
+- case ixgbe_fc_full:
+- /*
+- * Flow control (both RX and TX) is enabled by a software
+- * over-ride.
+- */
+- frctl_reg |= IXGBE_FCTRL_RFCE;
+- rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+- break;
+- default:
+- /* We should never get here. The value should be 0-3. */
+- hw_dbg(hw, "Flow control param set incorrectly\n");
+- break;
+- }
+-
+- /* Enable 802.3x based flow control settings. */
+- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg);
+- IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
++ u32 i;
++ u32 rar_entries = hw->mac.num_rar_entries;
++ struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
+
+- /*
+- * We need to set up the Receive Threshold high and low water
+- * marks as well as (optionally) enabling the transmission of
+- * XON frames.
+- */
+- if (hw->fc.type & ixgbe_fc_tx_pause) {
+- if (hw->fc.send_xon) {
+- IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
+- (hw->fc.low_water | IXGBE_FCRTL_XONE));
+- } else {
+- IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
+- hw->fc.low_water);
+- }
+- IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
+- (hw->fc.high_water)|IXGBE_FCRTH_FCEN);
+- }
++ if (a->mc_addr_in_rar_count > 0)
++ for (i = (rar_entries - a->mc_addr_in_rar_count);
++ i < rar_entries; i++)
++ ixgbe_disable_rar(hw, i);
+
+- IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time);
+- IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
++ if (a->mta_in_use > 0)
++ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
+
+ return 0;
+ }
+@@ -1049,13 +1500,24 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw,
+ **/
+ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
+ {
+- u32 ctrl;
+- s32 i;
++ u32 i;
++ u32 reg_val;
++ u32 number_of_queues;
+ s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+
+- ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+- ctrl |= IXGBE_CTRL_GIO_DIS;
+- IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
++ /* Disable the receive unit by stopping each queue */
++ number_of_queues = hw->mac.max_rx_queues;
++ for (i = 0; i < number_of_queues; i++) {
++ reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
++ if (reg_val & IXGBE_RXDCTL_ENABLE) {
++ reg_val &= ~IXGBE_RXDCTL_ENABLE;
++ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
++ }
++ }
++
++ reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL);
++ reg_val |= IXGBE_CTRL_GIO_DIS;
++ IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
+
+ for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) {
+@@ -1070,11 +1532,11 @@ s32 ixgbe_disable_pcie_master(struct ixg
+
+
+ /**
+- * ixgbe_acquire_swfw_sync - Aquire SWFW semaphore
++ * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+- * @mask: Mask to specify wich semaphore to acquire
++ * @mask: Mask to specify which semaphore to acquire
+ *
+- * Aquires the SWFW semaphore throught the GSSR register for the specified
++ * Acquires the SWFW semaphore thought the GSSR register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
+@@ -1116,9 +1578,9 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe
+ /**
+ * ixgbe_release_swfw_sync - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+- * @mask: Mask to specify wich semaphore to release
++ * @mask: Mask to specify which semaphore to release
+ *
+- * Releases the SWFW semaphore throught the GSSR register for the specified
++ * Releases the SWFW semaphore thought the GSSR register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
+@@ -1135,45 +1597,3 @@ void ixgbe_release_swfw_sync(struct ixgb
+ ixgbe_release_eeprom_semaphore(hw);
+ }
+
+-/**
+- * ixgbe_read_analog_reg8 - Reads 8 bit Atlas analog register
+- * @hw: pointer to hardware structure
+- * @reg: analog register to read
+- * @val: read value
+- *
+- * Performs write operation to analog register specified.
+- **/
+-s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val)
+-{
+- u32 atlas_ctl;
+-
+- IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
+- IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
+- IXGBE_WRITE_FLUSH(hw);
+- udelay(10);
+- atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
+- *val = (u8)atlas_ctl;
+-
+- return 0;
+-}
+-
+-/**
+- * ixgbe_write_analog_reg8 - Writes 8 bit Atlas analog register
+- * @hw: pointer to hardware structure
+- * @reg: atlas register to write
+- * @val: value to write
+- *
+- * Performs write operation to Atlas analog register specified.
+- **/
+-s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
+-{
+- u32 atlas_ctl;
+-
+- atlas_ctl = (reg << 8) | val;
+- IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
+- IXGBE_WRITE_FLUSH(hw);
+- udelay(10);
+-
+- return 0;
+-}
+-
+--- a/drivers/net/ixgbe/ixgbe_common.h
++++ b/drivers/net/ixgbe/ixgbe_common.h
+@@ -1,7 +1,7 @@
+ /*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+- Copyright(c) 1999 - 2007 Intel Corporation.
++ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+@@ -20,7 +20,6 @@
+ the file called "COPYING".
+
+ Contact Information:
+- Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+@@ -31,34 +30,45 @@
+
+ #include "ixgbe_type.h"
+
+-s32 ixgbe_init_hw(struct ixgbe_hw *hw);
+-s32 ixgbe_start_hw(struct ixgbe_hw *hw);
+-s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr);
+-s32 ixgbe_stop_adapter(struct ixgbe_hw *hw);
+-s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num);
+-
+-s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
+-s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
+-
+-s32 ixgbe_init_eeprom(struct ixgbe_hw *hw);
+-s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
+-s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
+-
+-s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind,
+- u32 enable_addr);
+-s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
+- u32 mc_addr_count, u32 pad);
+-s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+-s32 ixgbe_validate_mac_addr(u8 *mac_addr);
+-
+-s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packtetbuf_num);
++s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
++s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
++s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
++s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
++s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
++s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
++s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
++s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
++
++s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
++s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
++
++s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
++s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
++s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
++ u16 *data);
++s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
++ u16 *checksum_val);
++s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
++
++s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
++ u32 enable_addr);
++s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
++s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
++s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
++ u32 mc_addr_count,
++ ixgbe_mc_addr_itr func);
++s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
++ u32 addr_count, ixgbe_mc_addr_itr func);
++s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
++s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
+
++s32 ixgbe_validate_mac_addr(u8 *mac_addr);
+ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
+ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
+ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+
+-s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
+-s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
++s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val);
++s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val);
+
+ #define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+
+--- a/drivers/net/ixgbe/ixgbe_ethtool.c
++++ b/drivers/net/ixgbe/ixgbe_ethtool.c
+@@ -1,7 +1,7 @@
+ /*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+- Copyright(c) 1999 - 2007 Intel Corporation.
++ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+@@ -20,7 +20,6 @@
+ the file called "COPYING".
+
+ Contact Information:
+- Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+@@ -48,7 +47,7 @@ struct ixgbe_stats {
+ };
+
+ #define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \
+- offsetof(struct ixgbe_adapter, m)
++ offsetof(struct ixgbe_adapter, m)
+ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
+ {"rx_packets", IXGBE_STAT(net_stats.rx_packets)},
+ {"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
+@@ -90,19 +89,22 @@ static struct ixgbe_stats ixgbe_gstrings
+ {"rx_header_split", IXGBE_STAT(rx_hdr_split)},
+ {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
+ {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
++#ifdef CONFIG_IXGBE_LRO
+ {"lro_aggregated", IXGBE_STAT(lro_aggregated)},
+ {"lro_flushed", IXGBE_STAT(lro_flushed)},
++#endif
+ };
+
+ #define IXGBE_QUEUE_STATS_LEN \
+- ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \
+- ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \
+- (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
+-#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
++ ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \
++ ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \
++ (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
++#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
++#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
+ #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
+
+ static int ixgbe_get_settings(struct net_device *netdev,
+- struct ethtool_cmd *ecmd)
++ struct ethtool_cmd *ecmd)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+@@ -114,7 +116,7 @@ static int ixgbe_get_settings(struct net
+ ecmd->transceiver = XCVR_EXTERNAL;
+ if (hw->phy.media_type == ixgbe_media_type_copper) {
+ ecmd->supported |= (SUPPORTED_1000baseT_Full |
+- SUPPORTED_TP | SUPPORTED_Autoneg);
++ SUPPORTED_TP | SUPPORTED_Autoneg);
+
+ ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg);
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+@@ -126,14 +128,15 @@ static int ixgbe_get_settings(struct net
+ } else {
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising = (ADVERTISED_10000baseT_Full |
+- ADVERTISED_FIBRE);
++ ADVERTISED_FIBRE);
+ ecmd->port = PORT_FIBRE;
++ ecmd->autoneg = AUTONEG_DISABLE;
+ }
+
+- adapter->hw.mac.ops.check_link(hw, &(link_speed), &link_up);
++ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (link_up) {
+ ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+- SPEED_10000 : SPEED_1000;
++ SPEED_10000 : SPEED_1000;
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ecmd->speed = -1;
+@@ -144,7 +147,7 @@ static int ixgbe_get_settings(struct net
+ }
+
+ static int ixgbe_set_settings(struct net_device *netdev,
+- struct ethtool_cmd *ecmd)
++ struct ethtool_cmd *ecmd)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+@@ -164,7 +167,7 @@ static int ixgbe_set_settings(struct net
+ }
+
+ static void ixgbe_get_pauseparam(struct net_device *netdev,
+- struct ethtool_pauseparam *pause)
++ struct ethtool_pauseparam *pause)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+@@ -182,7 +185,7 @@ static void ixgbe_get_pauseparam(struct
+ }
+
+ static int ixgbe_set_pauseparam(struct net_device *netdev,
+- struct ethtool_pauseparam *pause)
++ struct ethtool_pauseparam *pause)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+@@ -233,15 +236,15 @@ static int ixgbe_set_rx_csum(struct net_
+
+ static u32 ixgbe_get_tx_csum(struct net_device *netdev)
+ {
+- return (netdev->features & NETIF_F_HW_CSUM) != 0;
++ return (netdev->features & NETIF_F_IP_CSUM) != 0;
+ }
+
+ static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
+ {
+ if (data)
+- netdev->features |= NETIF_F_HW_CSUM;
++ netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ else
+- netdev->features &= ~NETIF_F_HW_CSUM;
++ netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+
+ return 0;
+ }
+@@ -281,7 +284,7 @@ static int ixgbe_get_regs_len(struct net
+ #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
+
+ static void ixgbe_get_regs(struct net_device *netdev,
+- struct ethtool_regs *regs, void *p)
++ struct ethtool_regs *regs, void *p)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+@@ -315,7 +318,9 @@ static void ixgbe_get_regs(struct net_de
+ regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
+
+ /* Interrupt */
+- regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICR);
++ /* don't read EICR because it can clear interrupt causes, instead
++ * read EICS which is a shadow but doesn't clear EICR */
++ regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
+ regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
+ regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
+ regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
+@@ -325,7 +330,7 @@ static void ixgbe_get_regs(struct net_de
+ regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
+ regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
+ regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
+- regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL);
++ regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
+ regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
+
+ /* Flow Control */
+@@ -371,7 +376,7 @@ static void ixgbe_get_regs(struct net_de
+ regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
+ for (i = 0; i < 16; i++)
+ regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
+- regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE);
++ regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
+ regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
+@@ -419,7 +424,6 @@ static void ixgbe_get_regs(struct net_de
+ regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
+ regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT);
+
+- /* DCE */
+ regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
+ regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
+ regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
+@@ -539,21 +543,17 @@ static void ixgbe_get_regs(struct net_de
+ /* Diagnostic */
+ regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
+ for (i = 0; i < 8; i++)
+- regs_buff[1072] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
++ regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
+ regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
+- regs_buff[1081] = IXGBE_READ_REG(hw, IXGBE_RIC_DW0);
+- regs_buff[1082] = IXGBE_READ_REG(hw, IXGBE_RIC_DW1);
+- regs_buff[1083] = IXGBE_READ_REG(hw, IXGBE_RIC_DW2);
+- regs_buff[1084] = IXGBE_READ_REG(hw, IXGBE_RIC_DW3);
++ for (i = 0; i < 4; i++)
++ regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
+ regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
+ regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
+ for (i = 0; i < 8; i++)
+- regs_buff[1087] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
++ regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
+ regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
+- regs_buff[1096] = IXGBE_READ_REG(hw, IXGBE_TIC_DW0);
+- regs_buff[1097] = IXGBE_READ_REG(hw, IXGBE_TIC_DW1);
+- regs_buff[1098] = IXGBE_READ_REG(hw, IXGBE_TIC_DW2);
+- regs_buff[1099] = IXGBE_READ_REG(hw, IXGBE_TIC_DW3);
++ for (i = 0; i < 4; i++)
++ regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
+ regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
+ regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
+ regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
+@@ -566,7 +566,7 @@ static void ixgbe_get_regs(struct net_de
+ regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
+ regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
+ for (i = 0; i < 8; i++)
+- regs_buff[1111] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
++ regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
+ regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
+ regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
+ regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
+@@ -585,7 +585,7 @@ static int ixgbe_get_eeprom_len(struct n
+ }
+
+ static int ixgbe_get_eeprom(struct net_device *netdev,
+- struct ethtool_eeprom *eeprom, u8 *bytes)
++ struct ethtool_eeprom *eeprom, u8 *bytes)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+@@ -608,8 +608,8 @@ static int ixgbe_get_eeprom(struct net_d
+ return -ENOMEM;
+
+ for (i = 0; i < eeprom_len; i++) {
+- if ((ret_val = ixgbe_read_eeprom(hw, first_word + i,
+- &eeprom_buff[i])))
++ if ((ret_val = hw->eeprom.ops.read(hw, first_word + i,
++ &eeprom_buff[i])))
+ break;
+ }
+
+@@ -624,7 +624,7 @@ static int ixgbe_get_eeprom(struct net_d
+ }
+
+ static void ixgbe_get_drvinfo(struct net_device *netdev,
+- struct ethtool_drvinfo *drvinfo)
++ struct ethtool_drvinfo *drvinfo)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+@@ -637,7 +637,7 @@ static void ixgbe_get_drvinfo(struct net
+ }
+
+ static void ixgbe_get_ringparam(struct net_device *netdev,
+- struct ethtool_ringparam *ring)
++ struct ethtool_ringparam *ring)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_ring *tx_ring = adapter->tx_ring;
+@@ -654,15 +654,12 @@ static void ixgbe_get_ringparam(struct n
+ }
+
+ static int ixgbe_set_ringparam(struct net_device *netdev,
+- struct ethtool_ringparam *ring)
++ struct ethtool_ringparam *ring)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+- struct ixgbe_tx_buffer *old_buf;
+- struct ixgbe_rx_buffer *old_rx_buf;
+- void *old_desc;
++ struct ixgbe_ring *temp_ring;
+ int i, err;
+- u32 new_rx_count, new_tx_count, old_size;
+- dma_addr_t old_dma;
++ u32 new_rx_count, new_tx_count;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+@@ -681,6 +678,15 @@ static int ixgbe_set_ringparam(struct ne
+ return 0;
+ }
+
++ if (adapter->num_tx_queues > adapter->num_rx_queues)
++ temp_ring = vmalloc(adapter->num_tx_queues *
++ sizeof(struct ixgbe_ring));
++ else
++ temp_ring = vmalloc(adapter->num_rx_queues *
++ sizeof(struct ixgbe_ring));
++ if (!temp_ring)
++ return -ENOMEM;
++
+ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+ msleep(1);
+
+@@ -693,66 +699,61 @@ static int ixgbe_set_ringparam(struct ne
+ * to the tx and rx ring structs.
+ */
+ if (new_tx_count != adapter->tx_ring->count) {
++ memcpy(temp_ring, adapter->tx_ring,
++ adapter->num_tx_queues * sizeof(struct ixgbe_ring));
++
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+- /* Save existing descriptor ring */
+- old_buf = adapter->tx_ring[i].tx_buffer_info;
+- old_desc = adapter->tx_ring[i].desc;
+- old_size = adapter->tx_ring[i].size;
+- old_dma = adapter->tx_ring[i].dma;
+- /* Try to allocate a new one */
+- adapter->tx_ring[i].tx_buffer_info = NULL;
+- adapter->tx_ring[i].desc = NULL;
+- adapter->tx_ring[i].count = new_tx_count;
+- err = ixgbe_setup_tx_resources(adapter,
+- &adapter->tx_ring[i]);
++ temp_ring[i].count = new_tx_count;
++ err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]);
+ if (err) {
+- /* Restore the old one so at least
+- the adapter still works, even if
+- we failed the request */
+- adapter->tx_ring[i].tx_buffer_info = old_buf;
+- adapter->tx_ring[i].desc = old_desc;
+- adapter->tx_ring[i].size = old_size;
+- adapter->tx_ring[i].dma = old_dma;
++ while (i) {
++ i--;
++ ixgbe_free_tx_resources(adapter,
++ &temp_ring[i]);
++ }
+ goto err_setup;
+ }
+- /* Free the old buffer manually */
+- vfree(old_buf);
+- pci_free_consistent(adapter->pdev, old_size,
+- old_desc, old_dma);
+ }
++
++ for (i = 0; i < adapter->num_tx_queues; i++)
++ ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
++
++ memcpy(adapter->tx_ring, temp_ring,
++ adapter->num_tx_queues * sizeof(struct ixgbe_ring));
++
++ adapter->tx_ring_count = new_tx_count;
+ }
+
+ if (new_rx_count != adapter->rx_ring->count) {
+- for (i = 0; i < adapter->num_rx_queues; i++) {
++ memcpy(temp_ring, adapter->rx_ring,
++ adapter->num_rx_queues * sizeof(struct ixgbe_ring));
+
+- old_rx_buf = adapter->rx_ring[i].rx_buffer_info;
+- old_desc = adapter->rx_ring[i].desc;
+- old_size = adapter->rx_ring[i].size;
+- old_dma = adapter->rx_ring[i].dma;
+-
+- adapter->rx_ring[i].rx_buffer_info = NULL;
+- adapter->rx_ring[i].desc = NULL;
+- adapter->rx_ring[i].dma = 0;
+- adapter->rx_ring[i].count = new_rx_count;
+- err = ixgbe_setup_rx_resources(adapter,
+- &adapter->rx_ring[i]);
++ for (i = 0; i < adapter->num_rx_queues; i++) {
++ temp_ring[i].count = new_rx_count;
++ err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
+ if (err) {
+- adapter->rx_ring[i].rx_buffer_info = old_rx_buf;
+- adapter->rx_ring[i].desc = old_desc;
+- adapter->rx_ring[i].size = old_size;
+- adapter->rx_ring[i].dma = old_dma;
++ while (i) {
++ i--;
++ ixgbe_free_rx_resources(adapter,
++ &temp_ring[i]);
++ }
+ goto err_setup;
+ }
+-
+- vfree(old_rx_buf);
+- pci_free_consistent(adapter->pdev, old_size, old_desc,
+- old_dma);
+ }
++
++ for (i = 0; i < adapter->num_rx_queues; i++)
++ ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
++
++ memcpy(adapter->rx_ring, temp_ring,
++ adapter->num_rx_queues * sizeof(struct ixgbe_ring));
++
++ adapter->rx_ring_count = new_rx_count;
+ }
+
++ /* success! */
+ err = 0;
+ err_setup:
+- if (netif_running(adapter->netdev))
++ if (netif_running(netdev))
+ ixgbe_up(adapter);
+
+ clear_bit(__IXGBE_RESETTING, &adapter->state);
+@@ -770,20 +771,31 @@ static int ixgbe_get_sset_count(struct n
+ }
+
+ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
+- struct ethtool_stats *stats, u64 *data)
++ struct ethtool_stats *stats, u64 *data)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ u64 *queue_stat;
+ int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
+ int j, k;
+ int i;
++
++#ifdef CONFIG_IXGBE_LRO
+ u64 aggregated = 0, flushed = 0, no_desc = 0;
++ for (i = 0; i < adapter->num_rx_queues; i++) {
++ aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
++ flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
++ no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
++ }
++ adapter->lro_aggregated = aggregated;
++ adapter->lro_flushed = flushed;
++ adapter->lro_no_desc = no_desc;
++#endif
+
+ ixgbe_update_stats(adapter);
+ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+ char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset;
+ data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
+- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
++ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ queue_stat = (u64 *)&adapter->tx_ring[j].stats;
+@@ -792,24 +804,18 @@ static void ixgbe_get_ethtool_stats(stru
+ i += k;
+ }
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+- aggregated += adapter->rx_ring[j].lro_mgr.stats.aggregated;
+- flushed += adapter->rx_ring[j].lro_mgr.stats.flushed;
+- no_desc += adapter->rx_ring[j].lro_mgr.stats.no_desc;
+ queue_stat = (u64 *)&adapter->rx_ring[j].stats;
+ for (k = 0; k < stat_count; k++)
+ data[i + k] = queue_stat[k];
+ i += k;
+ }
+- adapter->lro_aggregated = aggregated;
+- adapter->lro_flushed = flushed;
+- adapter->lro_no_desc = no_desc;
+ }
+
+ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
+- u8 *data)
++ u8 *data)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+- u8 *p = data;
++ char *p = (char *)data;
+ int i;
+
+ switch (stringset) {
+@@ -831,14 +837,14 @@ static void ixgbe_get_strings(struct net
+ sprintf(p, "rx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+-/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
++ /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
+ break;
+ }
+ }
+
+
+ static void ixgbe_get_wol(struct net_device *netdev,
+- struct ethtool_wolinfo *wol)
++ struct ethtool_wolinfo *wol)
+ {
+ wol->supported = 0;
+ wol->wolopts = 0;
+@@ -859,16 +865,17 @@ static int ixgbe_nway_reset(struct net_d
+ static int ixgbe_phys_id(struct net_device *netdev, u32 data)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+- u32 led_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_LEDCTL);
++ struct ixgbe_hw *hw = &adapter->hw;
++ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ u32 i;
+
+ if (!data || data > 300)
+ data = 300;
+
+ for (i = 0; i < (data * 1000); i += 400) {
+- ixgbe_led_on(&adapter->hw, IXGBE_LED_ON);
++ hw->mac.ops.led_on(hw, IXGBE_LED_ON);
+ msleep_interruptible(200);
+- ixgbe_led_off(&adapter->hw, IXGBE_LED_ON);
++ hw->mac.ops.led_off(hw, IXGBE_LED_ON);
+ msleep_interruptible(200);
+ }
+
+@@ -879,67 +886,75 @@ static int ixgbe_phys_id(struct net_devi
+ }
+
+ static int ixgbe_get_coalesce(struct net_device *netdev,
+- struct ethtool_coalesce *ec)
++ struct ethtool_coalesce *ec)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+- if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
+- ec->rx_coalesce_usecs = adapter->rx_eitr;
+- else
+- ec->rx_coalesce_usecs = 1000000 / adapter->rx_eitr;
+-
+- if (adapter->tx_eitr < IXGBE_MIN_ITR_USECS)
+- ec->tx_coalesce_usecs = adapter->tx_eitr;
+- else
+- ec->tx_coalesce_usecs = 1000000 / adapter->tx_eitr;
+-
+ ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
++
++ /* only valid if in constant ITR mode */
++ switch (adapter->itr_setting) {
++ case 0:
++ /* throttling disabled */
++ ec->rx_coalesce_usecs = 0;
++ break;
++ case 1:
++ /* dynamic ITR mode */
++ ec->rx_coalesce_usecs = 1;
++ break;
++ default:
++ /* fixed interrupt rate mode */
++ ec->rx_coalesce_usecs = 1000000/adapter->eitr_param;
++ break;
++ }
+ return 0;
+ }
+
+ static int ixgbe_set_coalesce(struct net_device *netdev,
+- struct ethtool_coalesce *ec)
++ struct ethtool_coalesce *ec)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+-
+- if ((ec->rx_coalesce_usecs > IXGBE_MAX_ITR_USECS) ||
+- ((ec->rx_coalesce_usecs != 0) &&
+- (ec->rx_coalesce_usecs != 1) &&
+- (ec->rx_coalesce_usecs != 3) &&
+- (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
+- return -EINVAL;
+- if ((ec->tx_coalesce_usecs > IXGBE_MAX_ITR_USECS) ||
+- ((ec->tx_coalesce_usecs != 0) &&
+- (ec->tx_coalesce_usecs != 1) &&
+- (ec->tx_coalesce_usecs != 3) &&
+- (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
+- return -EINVAL;
+-
+- /* convert to rate of irq's per second */
+- if (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS)
+- adapter->rx_eitr = ec->rx_coalesce_usecs;
+- else
+- adapter->rx_eitr = (1000000 / ec->rx_coalesce_usecs);
+-
+- if (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS)
+- adapter->tx_eitr = ec->rx_coalesce_usecs;
+- else
+- adapter->tx_eitr = (1000000 / ec->tx_coalesce_usecs);
++ struct ixgbe_hw *hw = &adapter->hw;
++ int i;
+
+ if (ec->tx_max_coalesced_frames_irq)
+- adapter->tx_ring[0].work_limit =
+- ec->tx_max_coalesced_frames_irq;
++ adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq;
+
+- if (netif_running(netdev)) {
+- ixgbe_down(adapter);
+- ixgbe_up(adapter);
++ if (ec->rx_coalesce_usecs > 1) {
++ /* store the value in ints/second */
++ adapter->eitr_param = 1000000/ec->rx_coalesce_usecs;
++
++ /* static value of interrupt rate */
++ adapter->itr_setting = adapter->eitr_param;
++ /* clear the lower bit */
++ adapter->itr_setting &= ~1;
++ } else if (ec->rx_coalesce_usecs == 1) {
++ /* 1 means dynamic mode */
++ adapter->eitr_param = 20000;
++ adapter->itr_setting = 1;
++ } else {
++ /* any other value means disable eitr, which is best
++ * served by setting the interrupt rate very high */
++ adapter->eitr_param = 3000000;
++ adapter->itr_setting = 0;
++ }
++
++ for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
++ struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
++ if (q_vector->txr_count && !q_vector->rxr_count)
++ q_vector->eitr = (adapter->eitr_param >> 1);
++ else
++ /* rx only or mixed */
++ q_vector->eitr = adapter->eitr_param;
++ IXGBE_WRITE_REG(hw, IXGBE_EITR(i),
++ EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
+ }
+
+ return 0;
+ }
+
+
+-static struct ethtool_ops ixgbe_ethtool_ops = {
++static const struct ethtool_ops ixgbe_ethtool_ops = {
+ .get_settings = ixgbe_get_settings,
+ .set_settings = ixgbe_set_settings,
+ .get_drvinfo = ixgbe_get_drvinfo,
+@@ -966,7 +981,7 @@ static struct ethtool_ops ixgbe_ethtool_
+ .set_tso = ixgbe_set_tso,
+ .get_strings = ixgbe_get_strings,
+ .phys_id = ixgbe_phys_id,
+- .get_sset_count = ixgbe_get_sset_count,
++ .get_sset_count = ixgbe_get_sset_count,
+ .get_ethtool_stats = ixgbe_get_ethtool_stats,
+ .get_coalesce = ixgbe_get_coalesce,
+ .set_coalesce = ixgbe_set_coalesce,
+--- a/drivers/net/ixgbe/ixgbe.h
++++ b/drivers/net/ixgbe/ixgbe.h
+@@ -1,7 +1,7 @@
+ /*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+- Copyright(c) 1999 - 2007 Intel Corporation.
++ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+@@ -20,7 +20,6 @@
+ the file called "COPYING".
+
+ Contact Information:
+- Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+@@ -32,17 +31,20 @@
+ #include <linux/types.h>
+ #include <linux/pci.h>
+ #include <linux/netdevice.h>
++
++#ifdef CONFIG_IXGBE_LRO
+ #include <linux/inet_lro.h>
++#define IXGBE_MAX_LRO_AGGREGATE 32
++#define IXGBE_MAX_LRO_DESCRIPTORS 8
++#endif
+
+ #include "ixgbe_type.h"
+ #include "ixgbe_common.h"
+
+-#ifdef CONFIG_DCA
++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ #include <linux/dca.h>
+ #endif
+
+-#define IXGBE_ERR(args...) printk(KERN_ERR "ixgbe: " args)
+-
+ #define PFX "ixgbe: "
+ #define DPRINTK(nlevel, klevel, fmt, args...) \
+ ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
+@@ -58,23 +60,14 @@
+ #define IXGBE_MAX_RXD 4096
+ #define IXGBE_MIN_RXD 64
+
+-#define IXGBE_DEFAULT_RXQ 1
+-#define IXGBE_MAX_RXQ 1
+-#define IXGBE_MIN_RXQ 1
+-
+-#define IXGBE_DEFAULT_ITR_RX_USECS 125 /* 8k irqs/sec */
+-#define IXGBE_DEFAULT_ITR_TX_USECS 250 /* 4k irqs/sec */
+-#define IXGBE_MIN_ITR_USECS 100 /* 500k irqs/sec */
+-#define IXGBE_MAX_ITR_USECS 10000 /* 100 irqs/sec */
+-
+ /* flow control */
+ #define IXGBE_DEFAULT_FCRTL 0x10000
+-#define IXGBE_MIN_FCRTL 0
++#define IXGBE_MIN_FCRTL 0x40
+ #define IXGBE_MAX_FCRTL 0x7FF80
+ #define IXGBE_DEFAULT_FCRTH 0x20000
+-#define IXGBE_MIN_FCRTH 0
++#define IXGBE_MIN_FCRTH 0x600
+ #define IXGBE_MAX_FCRTH 0x7FFF0
+-#define IXGBE_DEFAULT_FCPAUSE 0x6800 /* may be too long */
++#define IXGBE_DEFAULT_FCPAUSE 0xFFFF
+ #define IXGBE_MIN_FCPAUSE 0
+ #define IXGBE_MAX_FCPAUSE 0xFFFF
+
+@@ -88,9 +81,6 @@
+
+ #define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+
+-/* How many Tx Descriptors do we need to call netif_wake_queue? */
+-#define IXGBE_TX_QUEUE_WAKE 16
+-
+ /* How many Rx Buffers do we bundle into one write to the hardware ? */
+ #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+@@ -101,9 +91,6 @@
+ #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
+ #define IXGBE_TX_FLAGS_VLAN_SHIFT 16
+
+-#define IXGBE_MAX_LRO_DESCRIPTORS 8
+-#define IXGBE_MAX_LRO_AGGREGATE 32
+-
+ /* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+ struct ixgbe_tx_buffer {
+@@ -119,6 +106,7 @@ struct ixgbe_rx_buffer {
+ dma_addr_t dma;
+ struct page *page;
+ dma_addr_t page_dma;
++ unsigned int page_offset;
+ };
+
+ struct ixgbe_queue_stats {
+@@ -150,22 +138,22 @@ struct ixgbe_ring {
+ * offset associated with this ring, which is different
+ * for DCE and RSS modes */
+
+-#ifdef CONFIG_DCA
++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ /* cpu for tx queue */
+ int cpu;
+ #endif
++#ifdef CONFIG_IXGBE_LRO
+ struct net_lro_mgr lro_mgr;
+ bool lro_used;
++#endif
+ struct ixgbe_queue_stats stats;
+- u8 v_idx; /* maps directly to the index for this ring in the hardware
+- * vector array, can also be used for finding the bit in EICR
+- * and friends that represents the vector for this ring */
++ u16 v_idx; /* maps directly to the index for this ring in the hardware
++ * vector array, can also be used for finding the bit in EICR
++ * and friends that represents the vector for this ring */
+
+- u32 eims_value;
+- u16 itr_register;
+
+- char name[IFNAMSIZ + 5];
+ u16 work_limit; /* max work per interrupt */
++ u16 rx_buf_len;
+ };
+
+ #define RING_F_VMDQ 1
+@@ -190,8 +178,8 @@ struct ixgbe_q_vector {
+ DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
+ u8 rxr_count; /* Rx ring count assigned to this vector */
+ u8 txr_count; /* Tx ring count assigned to this vector */
+- u8 tx_eitr;
+- u8 rx_eitr;
++ u8 tx_itr;
++ u8 rx_itr;
+ u32 eitr;
+ };
+
+@@ -228,7 +216,6 @@ struct ixgbe_adapter {
+ struct timer_list watchdog_timer;
+ struct vlan_group *vlgrp;
+ u16 bd_number;
+- u16 rx_buf_len;
+ struct work_struct reset_task;
+ struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS];
+ char name[MAX_MSIX_COUNT][IFNAMSIZ + 5];
+@@ -240,7 +227,9 @@ struct ixgbe_adapter {
+
+ /* TX */
+ struct ixgbe_ring *tx_ring; /* One per active queue */
++ int num_tx_queues;
+ u64 restart_queue;
++ u64 hw_csum_tx_good;
+ u64 lsc_int;
+ u64 hw_tso_ctxt;
+ u64 hw_tso6_ctxt;
+@@ -249,12 +238,10 @@ struct ixgbe_adapter {
+
+ /* RX */
+ struct ixgbe_ring *rx_ring; /* One per active queue */
+- u64 hw_csum_tx_good;
++ int num_rx_queues;
+ u64 hw_csum_rx_error;
+ u64 hw_csum_rx_good;
+ u64 non_eop_descs;
+- int num_tx_queues;
+- int num_rx_queues;
+ int num_msix_vectors;
+ struct ixgbe_ring_feature ring_feature[3];
+ struct msix_entry *msix_entries;
+@@ -267,15 +254,28 @@ struct ixgbe_adapter {
+ * thus the additional *_CAPABLE flags.
+ */
+ u32 flags;
+-#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1 << 0)
+-#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1)
+-#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 2)
+-#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
+-#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
+-#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5)
+-#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 6)
+-#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 7)
+-#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8)
++#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
++#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1)
++#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2)
++#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3)
++#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4)
++#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6)
++#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7)
++#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8)
++#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9)
++#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10)
++#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11)
++#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12)
++#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13)
++#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16)
++#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17)
++#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18)
++#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
++#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
++#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23)
++
++/* default to trying for four seconds */
++#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
+
+ /* OS defined structs */
+ struct net_device *netdev;
+@@ -288,14 +288,23 @@ struct ixgbe_adapter {
+ struct ixgbe_hw_stats stats;
+
+ /* Interrupt Throttle Rate */
+- u32 rx_eitr;
+- u32 tx_eitr;
++ u32 eitr_param;
+
+ unsigned long state;
+ u64 tx_busy;
++#ifndef IXGBE_NO_INET_LRO
+ u64 lro_aggregated;
+ u64 lro_flushed;
+ u64 lro_no_desc;
++#endif
++ unsigned int tx_ring_count;
++ unsigned int rx_ring_count;
++
++ u32 link_speed;
++ bool link_up;
++ unsigned long link_check_timeout;
++
++ struct work_struct watchdog_task;
+ };
+
+ enum ixbge_state_t {
+@@ -317,11 +326,11 @@ extern int ixgbe_up(struct ixgbe_adapter
+ extern void ixgbe_down(struct ixgbe_adapter *adapter);
+ extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
+ extern void ixgbe_reset(struct ixgbe_adapter *adapter);
+-extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
+ extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
+-extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
+- struct ixgbe_ring *rxdr);
+-extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
+- struct ixgbe_ring *txdr);
++extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
++extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
++extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
++extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
++extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
+
+ #endif /* _IXGBE_H_ */
+--- a/drivers/net/ixgbe/ixgbe_main.c
++++ b/drivers/net/ixgbe/ixgbe_main.c
+@@ -1,7 +1,7 @@
+ /*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+- Copyright(c) 1999 - 2007 Intel Corporation.
++ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+@@ -20,7 +20,6 @@
+ the file called "COPYING".
+
+ Contact Information:
+- Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+@@ -46,15 +45,14 @@
+
+ char ixgbe_driver_name[] = "ixgbe";
+ static const char ixgbe_driver_string[] =
+- "Intel(R) 10 Gigabit PCI Express Network Driver";
++ "Intel(R) 10 Gigabit PCI Express Network Driver";
+
+-#define DRV_VERSION "1.3.18-k4"
++#define DRV_VERSION "1.3.30-k2"
+ const char ixgbe_driver_version[] = DRV_VERSION;
+-static const char ixgbe_copyright[] =
+- "Copyright (c) 1999-2007 Intel Corporation.";
++static char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation.";
+
+ static const struct ixgbe_info *ixgbe_info_tbl[] = {
+- [board_82598] = &ixgbe_82598_info,
++ [board_82598] = &ixgbe_82598_info,
+ };
+
+ /* ixgbe_pci_tbl - PCI Device ID Table
+@@ -74,15 +72,17 @@ static struct pci_device_id ixgbe_pci_tb
+ board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
+ board_82598 },
++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
++ board_82598 },
+
+ /* required last entry */
+ {0, }
+ };
+ MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
+
+-#ifdef CONFIG_DCA
++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
+- void *p);
++ void *p);
+ static struct notifier_block dca_notifier = {
+ .notifier_call = ixgbe_notify_dca,
+ .next = NULL,
+@@ -104,7 +104,7 @@ static void ixgbe_release_hw_control(str
+ /* Let firmware take over control of h/w */
+ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
+- ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
++ ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
+ }
+
+ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
+@@ -114,24 +114,11 @@ static void ixgbe_get_hw_control(struct
+ /* Let firmware know the driver has taken over */
+ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
+- ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
+-}
+-
+-#ifdef DEBUG
+-/**
+- * ixgbe_get_hw_dev_name - return device name string
+- * used by hardware layer to print debugging information
+- **/
+-char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
+-{
+- struct ixgbe_adapter *adapter = hw->back;
+- struct net_device *netdev = adapter->netdev;
+- return netdev->name;
++ ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
+ }
+-#endif
+
+ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
+- u8 msix_vector)
++ u8 msix_vector)
+ {
+ u32 ivar, index;
+
+@@ -144,13 +131,12 @@ static void ixgbe_set_ivar(struct ixgbe_
+ }
+
+ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
+- struct ixgbe_tx_buffer
+- *tx_buffer_info)
++ struct ixgbe_tx_buffer
++ *tx_buffer_info)
+ {
+ if (tx_buffer_info->dma) {
+- pci_unmap_page(adapter->pdev,
+- tx_buffer_info->dma,
+- tx_buffer_info->length, PCI_DMA_TODEVICE);
++ pci_unmap_page(adapter->pdev, tx_buffer_info->dma,
++ tx_buffer_info->length, PCI_DMA_TODEVICE);
+ tx_buffer_info->dma = 0;
+ }
+ if (tx_buffer_info->skb) {
+@@ -161,107 +147,120 @@ static void ixgbe_unmap_and_free_tx_reso
+ }
+
+ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
+- struct ixgbe_ring *tx_ring,
+- unsigned int eop,
+- union ixgbe_adv_tx_desc *eop_desc)
++ struct ixgbe_ring *tx_ring,
++ unsigned int eop)
+ {
++ struct ixgbe_hw *hw = &adapter->hw;
++ u32 head, tail;
++
+ /* Detect a transmit hang in hardware, this serializes the
+- * check with the clearing of time_stamp and movement of i */
++ * check with the clearing of time_stamp and movement of eop */
++ head = IXGBE_READ_REG(hw, tx_ring->head);
++ tail = IXGBE_READ_REG(hw, tx_ring->tail);
+ adapter->detect_tx_hung = false;
+- if (tx_ring->tx_buffer_info[eop].dma &&
++ if ((head != tail) &&
++ tx_ring->tx_buffer_info[eop].time_stamp &&
+ time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
+ !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
+ /* detected Tx unit hang */
++ union ixgbe_adv_tx_desc *tx_desc;
++ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
+- " TDH <%x>\n"
+- " TDT <%x>\n"
++ " Tx Queue <%d>\n"
++ " TDH, TDT <%x>, <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "tx_buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+- " next_to_watch <%x>\n"
+- " jiffies <%lx>\n"
+- " next_to_watch.status <%x>\n",
+- readl(adapter->hw.hw_addr + tx_ring->head),
+- readl(adapter->hw.hw_addr + tx_ring->tail),
+- tx_ring->next_to_use,
+- tx_ring->next_to_clean,
+- tx_ring->tx_buffer_info[eop].time_stamp,
+- eop, jiffies, eop_desc->wb.status);
++ " jiffies <%lx>\n",
++ tx_ring->queue_index,
++ head, tail,
++ tx_ring->next_to_use, eop,
++ tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
+ return true;
+ }
+
+ return false;
+ }
+
+-#define IXGBE_MAX_TXD_PWR 14
+-#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
++#define IXGBE_MAX_TXD_PWR 14
++#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+
+ /* Tx Descriptors needed, worst case */
+ #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
+ (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
+ #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
+- MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
++ MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
++
++#define GET_TX_HEAD_FROM_RING(ring) (\
++ *(volatile u32 *) \
++ ((union ixgbe_adv_tx_desc *)(ring)->desc + (ring)->count))
++static void ixgbe_tx_timeout(struct net_device *netdev);
+
+ /**
+ * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
++ * @tx_ring: tx ring to clean
+ **/
+ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
+- struct ixgbe_ring *tx_ring)
++ struct ixgbe_ring *tx_ring)
+ {
+- struct net_device *netdev = adapter->netdev;
+- union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
++ union ixgbe_adv_tx_desc *tx_desc;
+ struct ixgbe_tx_buffer *tx_buffer_info;
+- unsigned int i, eop;
+- bool cleaned = false;
+- unsigned int total_tx_bytes = 0, total_tx_packets = 0;
+-
++ struct net_device *netdev = adapter->netdev;
++ struct sk_buff *skb;
++ unsigned int i;
++ u32 head, oldhead;
++ unsigned int count = 0;
++ unsigned int total_bytes = 0, total_packets = 0;
++
++ rmb();
++ head = GET_TX_HEAD_FROM_RING(tx_ring);
++ head = le32_to_cpu(head);
+ i = tx_ring->next_to_clean;
+- eop = tx_ring->tx_buffer_info[i].next_to_watch;
+- eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+- while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) {
+- cleaned = false;
+- while (!cleaned) {
++ while (1) {
++ while (i != head) {
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+- cleaned = (i == eop);
++ skb = tx_buffer_info->skb;
+
+- tx_ring->stats.bytes += tx_buffer_info->length;
+- if (cleaned) {
+- struct sk_buff *skb = tx_buffer_info->skb;
++ if (skb) {
+ unsigned int segs, bytecount;
++
++ /* gso_segs is currently only valid for tcp */
+ segs = skb_shinfo(skb)->gso_segs ?: 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) +
+- skb->len;
+- total_tx_packets += segs;
+- total_tx_bytes += bytecount;
++ skb->len;
++ total_packets += segs;
++ total_bytes += bytecount;
+ }
++
+ ixgbe_unmap_and_free_tx_resource(adapter,
+- tx_buffer_info);
+- tx_desc->wb.status = 0;
++ tx_buffer_info);
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+- }
+
+- tx_ring->stats.packets++;
+-
+- eop = tx_ring->tx_buffer_info[i].next_to_watch;
+- eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+-
+- /* weight of a sort for tx, avoid endless transmit cleanup */
+- if (total_tx_packets >= tx_ring->work_limit)
+- break;
+- }
++ count++;
++ if (count == tx_ring->count)
++ goto done_cleaning;
++ }
++ oldhead = head;
++ rmb();
++ head = GET_TX_HEAD_FROM_RING(tx_ring);
++ head = le32_to_cpu(head);
++ if (head == oldhead)
++ goto done_cleaning;
++ } /* while (1) */
+
++done_cleaning:
+ tx_ring->next_to_clean = i;
+
+ #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+- if (total_tx_packets && netif_carrier_ok(netdev) &&
+- (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
++ if (unlikely(count && netif_carrier_ok(netdev) &&
++ (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+@@ -269,59 +268,68 @@ static bool ixgbe_clean_tx_irq(struct ix
+ if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
+ !test_bit(__IXGBE_DOWN, &adapter->state)) {
+ netif_wake_subqueue(netdev, tx_ring->queue_index);
+- adapter->restart_queue++;
++ ++adapter->restart_queue;
+ }
+ }
+
+- if (adapter->detect_tx_hung)
+- if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc))
+- netif_stop_subqueue(netdev, tx_ring->queue_index);
+-
+- if (total_tx_packets >= tx_ring->work_limit)
+- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value);
++ if (adapter->detect_tx_hung) {
++ if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
++ /* schedule immediate reset if we believe we hung */
++ DPRINTK(PROBE, INFO,
++ "tx hang %d detected, resetting adapter\n",
++ adapter->tx_timeout_count + 1);
++ ixgbe_tx_timeout(adapter->netdev);
++ }
++ }
+
+- tx_ring->total_bytes += total_tx_bytes;
+- tx_ring->total_packets += total_tx_packets;
+- adapter->net_stats.tx_bytes += total_tx_bytes;
+- adapter->net_stats.tx_packets += total_tx_packets;
+- cleaned = total_tx_packets ? true : false;
+- return cleaned;
++ /* re-arm the interrupt */
++ if ((total_packets >= tx_ring->work_limit) ||
++ (count == tx_ring->count))
++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx);
++
++ tx_ring->total_bytes += total_bytes;
++ tx_ring->total_packets += total_packets;
++ tx_ring->stats.bytes += total_bytes;
++ tx_ring->stats.packets += total_packets;
++ adapter->net_stats.tx_bytes += total_bytes;
++ adapter->net_stats.tx_packets += total_packets;
++ return (total_packets ? true : false);
+ }
+
+-#ifdef CONFIG_DCA
++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
+- struct ixgbe_ring *rxr)
++ struct ixgbe_ring *rx_ring)
+ {
+ u32 rxctrl;
+ int cpu = get_cpu();
+- int q = rxr - adapter->rx_ring;
++ int q = rx_ring - adapter->rx_ring;
+
+- if (rxr->cpu != cpu) {
++ if (rx_ring->cpu != cpu) {
+ rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
+ rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
+- rxctrl |= dca_get_tag(cpu);
++ rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+ rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
+ rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
+- rxr->cpu = cpu;
++ rx_ring->cpu = cpu;
+ }
+ put_cpu();
+ }
+
+ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
+- struct ixgbe_ring *txr)
++ struct ixgbe_ring *tx_ring)
+ {
+ u32 txctrl;
+ int cpu = get_cpu();
+- int q = txr - adapter->tx_ring;
++ int q = tx_ring - adapter->tx_ring;
+
+- if (txr->cpu != cpu) {
++ if (tx_ring->cpu != cpu) {
+ txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
+ txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
+- txctrl |= dca_get_tag(cpu);
++ txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+ txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
+- txr->cpu = cpu;
++ tx_ring->cpu = cpu;
+ }
+ put_cpu();
+ }
+@@ -351,11 +359,14 @@ static int __ixgbe_notify_dca(struct dev
+
+ switch (event) {
+ case DCA_PROVIDER_ADD:
+- adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
++ /* if we're already enabled, don't do it again */
++ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
++ break;
+ /* Always use CB2 mode, difference is masked
+ * in the CB driver. */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
+ if (dca_add_requester(dev) == 0) {
++ adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+ ixgbe_setup_dca(adapter);
+ break;
+ }
+@@ -372,7 +383,7 @@ static int __ixgbe_notify_dca(struct dev
+ return 0;
+ }
+
+-#endif /* CONFIG_DCA */
++#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
+ /**
+ * ixgbe_receive_skb - Send a completed packet up the stack
+ * @adapter: board private structure
+@@ -382,13 +393,14 @@ static int __ixgbe_notify_dca(struct dev
+ * @rx_desc: rx descriptor
+ **/
+ static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
+- struct sk_buff *skb, u8 status,
+- struct ixgbe_ring *ring,
++ struct sk_buff *skb, u8 status,
++ struct ixgbe_ring *ring,
+ union ixgbe_adv_rx_desc *rx_desc)
+ {
+ bool is_vlan = (status & IXGBE_RXD_STAT_VP);
+ u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
+
++#ifdef CONFIG_IXGBE_LRO
+ if (adapter->netdev->features & NETIF_F_LRO &&
+ skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ if (adapter->vlgrp && is_vlan)
+@@ -399,6 +411,7 @@ static void ixgbe_receive_skb(struct ixg
+ lro_receive_skb(&ring->lro_mgr, skb, rx_desc);
+ ring->lro_used = true;
+ } else {
++#endif
+ if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
+ if (adapter->vlgrp && is_vlan)
+ vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
+@@ -410,7 +423,9 @@ static void ixgbe_receive_skb(struct ixg
+ else
+ netif_rx(skb);
+ }
++#ifdef CONFIG_IXGBE_LRO
+ }
++#endif
+ }
+
+ /**
+@@ -420,14 +435,12 @@ static void ixgbe_receive_skb(struct ixg
+ * @skb: skb currently being received and modified
+ **/
+ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
+- u32 status_err,
+- struct sk_buff *skb)
++ u32 status_err, struct sk_buff *skb)
+ {
+ skb->ip_summed = CHECKSUM_NONE;
+
+- /* Ignore Checksum bit is set, or rx csum disabled */
+- if ((status_err & IXGBE_RXD_STAT_IXSM) ||
+- !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
++ /* Rx csum disabled */
++ if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+ return;
+
+ /* if IP and error */
+@@ -455,37 +468,44 @@ static inline void ixgbe_rx_checksum(str
+ * @adapter: address of board private structure
+ **/
+ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
+- struct ixgbe_ring *rx_ring,
+- int cleaned_count)
++ struct ixgbe_ring *rx_ring,
++ int cleaned_count)
+ {
+- struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ union ixgbe_adv_rx_desc *rx_desc;
+- struct ixgbe_rx_buffer *rx_buffer_info;
+- struct sk_buff *skb;
++ struct ixgbe_rx_buffer *bi;
+ unsigned int i;
+- unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN;
++ unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
+
+ i = rx_ring->next_to_use;
+- rx_buffer_info = &rx_ring->rx_buffer_info[i];
++ bi = &rx_ring->rx_buffer_info[i];
+
+ while (cleaned_count--) {
+ rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+
+- if (!rx_buffer_info->page &&
+- (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
+- rx_buffer_info->page = alloc_page(GFP_ATOMIC);
+- if (!rx_buffer_info->page) {
+- adapter->alloc_rx_page_failed++;
+- goto no_buffers;
++ if (!bi->page_dma &&
++ (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
++ if (!bi->page) {
++ bi->page = alloc_page(GFP_ATOMIC);
++ if (!bi->page) {
++ adapter->alloc_rx_page_failed++;
++ goto no_buffers;
++ }
++ bi->page_offset = 0;
++ } else {
++ /* use a half page if we're re-using */
++ bi->page_offset ^= (PAGE_SIZE / 2);
+ }
+- rx_buffer_info->page_dma =
+- pci_map_page(pdev, rx_buffer_info->page,
+- 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
++
++ bi->page_dma = pci_map_page(pdev, bi->page,
++ bi->page_offset,
++ (PAGE_SIZE / 2),
++ PCI_DMA_FROMDEVICE);
+ }
+
+- if (!rx_buffer_info->skb) {
+- skb = netdev_alloc_skb(netdev, bufsz);
++ if (!bi->skb) {
++ struct sk_buff *skb = netdev_alloc_skb(adapter->netdev,
++ bufsz);
+
+ if (!skb) {
+ adapter->alloc_rx_buff_failed++;
+@@ -499,28 +519,25 @@ static void ixgbe_alloc_rx_buffers(struc
+ */
+ skb_reserve(skb, NET_IP_ALIGN);
+
+- rx_buffer_info->skb = skb;
+- rx_buffer_info->dma = pci_map_single(pdev, skb->data,
+- bufsz,
+- PCI_DMA_FROMDEVICE);
++ bi->skb = skb;
++ bi->dma = pci_map_single(pdev, skb->data, bufsz,
++ PCI_DMA_FROMDEVICE);
+ }
+ /* Refresh the desc even if buffer_addrs didn't change because
+ * each write-back erases this info. */
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+- rx_desc->read.pkt_addr =
+- cpu_to_le64(rx_buffer_info->page_dma);
+- rx_desc->read.hdr_addr =
+- cpu_to_le64(rx_buffer_info->dma);
++ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
++ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+ } else {
+- rx_desc->read.pkt_addr =
+- cpu_to_le64(rx_buffer_info->dma);
++ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+- rx_buffer_info = &rx_ring->rx_buffer_info[i];
++ bi = &rx_ring->rx_buffer_info[i];
+ }
++
+ no_buffers:
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+@@ -538,46 +555,54 @@ no_buffers:
+ }
+ }
+
++static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
++{
++ return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
++}
++
++static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
++{
++ return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
++}
++
+ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
+- struct ixgbe_ring *rx_ring,
+- int *work_done, int work_to_do)
++ struct ixgbe_ring *rx_ring,
++ int *work_done, int work_to_do)
+ {
+- struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
+ struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
+ struct sk_buff *skb;
+ unsigned int i;
+- u32 upper_len, len, staterr;
++ u32 len, staterr;
+ u16 hdr_info;
+ bool cleaned = false;
+ int cleaned_count = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+
+ i = rx_ring->next_to_clean;
+- upper_len = 0;
+ rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
+ while (staterr & IXGBE_RXD_STAT_DD) {
++ u32 upper_len = 0;
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+- hdr_info =
+- le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info);
+- len =
+- ((hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+- IXGBE_RXDADV_HDRBUFLEN_SHIFT);
++ hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
++ len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
++ IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+ if (hdr_info & IXGBE_RXDADV_SPH)
+ adapter->rx_hdr_split++;
+ if (len > IXGBE_RX_HDR_SIZE)
+ len = IXGBE_RX_HDR_SIZE;
+ upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+- } else
++ } else {
+ len = le16_to_cpu(rx_desc->wb.upper.length);
++ }
+
+ cleaned = true;
+ skb = rx_buffer_info->skb;
+@@ -586,18 +611,25 @@ static bool ixgbe_clean_rx_irq(struct ix
+
+ if (len && !skb_shinfo(skb)->nr_frags) {
+ pci_unmap_single(pdev, rx_buffer_info->dma,
+- adapter->rx_buf_len + NET_IP_ALIGN,
+- PCI_DMA_FROMDEVICE);
++ rx_ring->rx_buf_len + NET_IP_ALIGN,
++ PCI_DMA_FROMDEVICE);
+ skb_put(skb, len);
+ }
+
+ if (upper_len) {
+ pci_unmap_page(pdev, rx_buffer_info->page_dma,
+- PAGE_SIZE, PCI_DMA_FROMDEVICE);
++ PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ rx_buffer_info->page_dma = 0;
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+- rx_buffer_info->page, 0, upper_len);
+- rx_buffer_info->page = NULL;
++ rx_buffer_info->page,
++ rx_buffer_info->page_offset,
++ upper_len);
++
++ if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
++ (page_count(rx_buffer_info->page) != 1))
++ rx_buffer_info->page = NULL;
++ else
++ get_page(rx_buffer_info->page);
+
+ skb->len += upper_len;
+ skb->data_len += upper_len;
+@@ -620,6 +652,7 @@ static bool ixgbe_clean_rx_irq(struct ix
+ rx_buffer_info->skb = next_buffer->skb;
+ rx_buffer_info->dma = next_buffer->dma;
+ next_buffer->skb = skb;
++ next_buffer->dma = 0;
+ adapter->non_eop_descs++;
+ goto next_desc;
+ }
+@@ -635,9 +668,9 @@ static bool ixgbe_clean_rx_irq(struct ix
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+- skb->protocol = eth_type_trans(skb, netdev);
++ skb->protocol = eth_type_trans(skb, adapter->netdev);
+ ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
+- netdev->last_rx = jiffies;
++ adapter->netdev->last_rx = jiffies;
+
+ next_desc:
+ rx_desc->wb.upper.status_error = 0;
+@@ -655,10 +688,12 @@ next_desc:
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+
++#ifdef CONFIG_IXGBE_LRO
+ if (rx_ring->lro_used) {
+ lro_flush_all(&rx_ring->lro_mgr);
+ rx_ring->lro_used = false;
+ }
++#endif
+
+ rx_ring->next_to_clean = i;
+ cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
+@@ -666,9 +701,6 @@ next_desc:
+ if (cleaned_count)
+ ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+
+- adapter->net_stats.rx_bytes += total_rx_bytes;
+- adapter->net_stats.rx_packets += total_rx_packets;
+-
+ rx_ring->total_packets += total_rx_packets;
+ rx_ring->total_bytes += total_rx_bytes;
+ adapter->net_stats.rx_bytes += total_rx_bytes;
+@@ -700,43 +732,43 @@ static void ixgbe_configure_msix(struct
+ q_vector = &adapter->q_vector[v_idx];
+ /* XXX for_each_bit(...) */
+ r_idx = find_first_bit(q_vector->rxr_idx,
+- adapter->num_rx_queues);
++ adapter->num_rx_queues);
+
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ j = adapter->rx_ring[r_idx].reg_idx;
+ ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx);
+ r_idx = find_next_bit(q_vector->rxr_idx,
+- adapter->num_rx_queues,
+- r_idx + 1);
++ adapter->num_rx_queues,
++ r_idx + 1);
+ }
+ r_idx = find_first_bit(q_vector->txr_idx,
+- adapter->num_tx_queues);
++ adapter->num_tx_queues);
+
+ for (i = 0; i < q_vector->txr_count; i++) {
+ j = adapter->tx_ring[r_idx].reg_idx;
+ ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx);
+ r_idx = find_next_bit(q_vector->txr_idx,
+- adapter->num_tx_queues,
+- r_idx + 1);
++ adapter->num_tx_queues,
++ r_idx + 1);
+ }
+
+- /* if this is a tx only vector use half the irq (tx) rate */
++ /* if this is a tx only vector halve the interrupt rate */
+ if (q_vector->txr_count && !q_vector->rxr_count)
+- q_vector->eitr = adapter->tx_eitr;
++ q_vector->eitr = (adapter->eitr_param >> 1);
+ else
+- /* rx only or mixed */
+- q_vector->eitr = adapter->rx_eitr;
++ /* rx only */
++ q_vector->eitr = adapter->eitr_param;
+
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
+- EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
++ EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
+ }
+
+ ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
+
+- /* set up to autoclear timer, lsc, and the vectors */
++ /* set up to autoclear timer, and the vectors */
+ mask = IXGBE_EIMS_ENABLE_MASK;
+- mask &= ~IXGBE_EIMS_OTHER;
++ mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
+ }
+
+@@ -766,8 +798,8 @@ enum latency_range {
+ * parameter (see ixgbe_param.c)
+ **/
+ static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
+- u32 eitr, u8 itr_setting,
+- int packets, int bytes)
++ u32 eitr, u8 itr_setting,
++ int packets, int bytes)
+ {
+ unsigned int retval = itr_setting;
+ u32 timepassed_us;
+@@ -814,40 +846,40 @@ static void ixgbe_set_itr_msix(struct ix
+ u32 new_itr;
+ u8 current_itr, ret_itr;
+ int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
+- sizeof(struct ixgbe_q_vector);
++ sizeof(struct ixgbe_q_vector);
+ struct ixgbe_ring *rx_ring, *tx_ring;
+
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->txr_count; i++) {
+ tx_ring = &(adapter->tx_ring[r_idx]);
+ ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
+- q_vector->tx_eitr,
+- tx_ring->total_packets,
+- tx_ring->total_bytes);
++ q_vector->tx_itr,
++ tx_ring->total_packets,
++ tx_ring->total_bytes);
+ /* if the result for this queue would decrease interrupt
+ * rate for this vector then use that result */
+- q_vector->tx_eitr = ((q_vector->tx_eitr > ret_itr) ?
+- q_vector->tx_eitr - 1 : ret_itr);
++ q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
++ q_vector->tx_itr - 1 : ret_itr);
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+- r_idx + 1);
++ r_idx + 1);
+ }
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
+- q_vector->rx_eitr,
+- rx_ring->total_packets,
+- rx_ring->total_bytes);
++ q_vector->rx_itr,
++ rx_ring->total_packets,
++ rx_ring->total_bytes);
+ /* if the result for this queue would decrease interrupt
+ * rate for this vector then use that result */
+- q_vector->rx_eitr = ((q_vector->rx_eitr > ret_itr) ?
+- q_vector->rx_eitr - 1 : ret_itr);
++ q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
++ q_vector->rx_itr - 1 : ret_itr);
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+- r_idx + 1);
++ r_idx + 1);
+ }
+
+- current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr);
++ current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+@@ -871,13 +903,27 @@ static void ixgbe_set_itr_msix(struct ix
+ itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
+ /* must write high and low 16 bits to reset counter */
+ DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx,
+- itr_reg);
++ itr_reg);
+ IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16);
+ }
+
+ return;
+ }
+
++
++static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
++{
++ struct ixgbe_hw *hw = &adapter->hw;
++
++ adapter->lsc_int++;
++ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
++ adapter->link_check_timeout = jiffies;
++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
++ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
++ schedule_work(&adapter->watchdog_task);
++ }
++}
++
+ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
+ {
+ struct net_device *netdev = data;
+@@ -885,11 +931,8 @@ static irqreturn_t ixgbe_msix_lsc(int ir
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+
+- if (eicr & IXGBE_EICR_LSC) {
+- adapter->lsc_int++;
+- if (!test_bit(__IXGBE_DOWN, &adapter->state))
+- mod_timer(&adapter->watchdog_timer, jiffies);
+- }
++ if (eicr & IXGBE_EICR_LSC)
++ ixgbe_check_lsc(adapter);
+
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
+@@ -901,7 +944,7 @@ static irqreturn_t ixgbe_msix_clean_tx(i
+ {
+ struct ixgbe_q_vector *q_vector = data;
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+- struct ixgbe_ring *txr;
++ struct ixgbe_ring *tx_ring;
+ int i, r_idx;
+
+ if (!q_vector->txr_count)
+@@ -909,16 +952,16 @@ static irqreturn_t ixgbe_msix_clean_tx(i
+
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->txr_count; i++) {
+- txr = &(adapter->tx_ring[r_idx]);
+-#ifdef CONFIG_DCA
++ tx_ring = &(adapter->tx_ring[r_idx]);
++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+- ixgbe_update_tx_dca(adapter, txr);
++ ixgbe_update_tx_dca(adapter, tx_ring);
+ #endif
+- txr->total_bytes = 0;
+- txr->total_packets = 0;
+- ixgbe_clean_tx_irq(adapter, txr);
++ tx_ring->total_bytes = 0;
++ tx_ring->total_packets = 0;
++ ixgbe_clean_tx_irq(adapter, tx_ring);
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+- r_idx + 1);
++ r_idx + 1);
+ }
+
+ return IRQ_HANDLED;
+@@ -933,18 +976,26 @@ static irqreturn_t ixgbe_msix_clean_rx(i
+ {
+ struct ixgbe_q_vector *q_vector = data;
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+- struct ixgbe_ring *rxr;
++ struct ixgbe_ring *rx_ring;
+ int r_idx;
++ int i;
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
++ for (i = 0; i < q_vector->rxr_count; i++) {
++ rx_ring = &(adapter->rx_ring[r_idx]);
++ rx_ring->total_bytes = 0;
++ rx_ring->total_packets = 0;
++ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
++ r_idx + 1);
++ }
++
+ if (!q_vector->rxr_count)
+ return IRQ_HANDLED;
+
+- rxr = &(adapter->rx_ring[r_idx]);
++ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
++ rx_ring = &(adapter->rx_ring[r_idx]);
+ /* disable interrupts on this vector only */
+- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->v_idx);
+- rxr->total_bytes = 0;
+- rxr->total_packets = 0;
++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
+ netif_rx_schedule(adapter->netdev, &q_vector->napi);
+
+ return IRQ_HANDLED;
+@@ -963,39 +1014,90 @@ static irqreturn_t ixgbe_msix_clean_many
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
++ * This function is optimized for cleaning one queue only on a single
++ * q_vector!!!
+ **/
+ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
+ {
+ struct ixgbe_q_vector *q_vector =
+- container_of(napi, struct ixgbe_q_vector, napi);
++ container_of(napi, struct ixgbe_q_vector, napi);
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+- struct ixgbe_ring *rxr;
++ struct ixgbe_ring *rx_ring = NULL;
+ int work_done = 0;
+ long r_idx;
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+- rxr = &(adapter->rx_ring[r_idx]);
+-#ifdef CONFIG_DCA
++ rx_ring = &(adapter->rx_ring[r_idx]);
++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+- ixgbe_update_rx_dca(adapter, rxr);
++ ixgbe_update_rx_dca(adapter, rx_ring);
+ #endif
+
+- ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget);
++ ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
+
+ /* If all Rx work done, exit the polling mode */
+ if (work_done < budget) {
+ netif_rx_complete(adapter->netdev, napi);
+- if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
++ if (adapter->itr_setting & 3)
+ ixgbe_set_itr_msix(q_vector);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->v_idx);
++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx);
+ }
+
+ return work_done;
+ }
+
++/**
++ * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine
++ * @napi: napi struct with our devices info in it
++ * @budget: amount of work driver is allowed to do this pass, in packets
++ *
++ * This function will clean more than one rx queue associated with a
++ * q_vector.
++ **/
++static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
++{
++ struct ixgbe_q_vector *q_vector =
++ container_of(napi, struct ixgbe_q_vector, napi);
++ struct ixgbe_adapter *adapter = q_vector->adapter;
++ struct ixgbe_ring *rx_ring = NULL;
++ int work_done = 0, i;
++ long r_idx;
++ u16 enable_mask = 0;
++
++ /* attempt to distribute budget to each queue fairly, but don't allow
++ * the budget to go below 1 because we'll exit polling */
++ budget /= (q_vector->rxr_count ?: 1);
++ budget = max(budget, 1);
++ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
++ for (i = 0; i < q_vector->rxr_count; i++) {
++ rx_ring = &(adapter->rx_ring[r_idx]);
++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
++ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
++ ixgbe_update_rx_dca(adapter, rx_ring);
++#endif
++ ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
++ enable_mask |= rx_ring->v_idx;
++ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
++ r_idx + 1);
++ }
++
++ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
++ rx_ring = &(adapter->rx_ring[r_idx]);
++ /* If all Rx work done, exit the polling mode */
++ if (work_done < budget) {
++ netif_rx_complete(adapter->netdev, napi);
++ if (adapter->itr_setting & 3)
++ ixgbe_set_itr_msix(q_vector);
++ if (!test_bit(__IXGBE_DOWN, &adapter->state))
++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask);
++ return 0;
++ }
++
++ return work_done;
++}
+ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
+- int r_idx)
++ int r_idx)
+ {
+ a->q_vector[v_idx].adapter = a;
+ set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
+@@ -1004,7 +1106,7 @@ static inline void map_vector_to_rxq(str
+ }
+
+ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
+- int r_idx)
++ int r_idx)
+ {
+ a->q_vector[v_idx].adapter = a;
+ set_bit(r_idx, a->q_vector[v_idx].txr_idx);
+@@ -1024,7 +1126,7 @@ static inline void map_vector_to_txq(str
+ * mapping configurations in here.
+ **/
+ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
+- int vectors)
++ int vectors)
+ {
+ int v_start = 0;
+ int rxr_idx = 0, txr_idx = 0;
+@@ -1101,28 +1203,28 @@ static int ixgbe_request_msix_irqs(struc
+ goto out;
+
+ #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
+- (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
+- &ixgbe_msix_clean_many)
++ (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
++ &ixgbe_msix_clean_many)
+ for (vector = 0; vector < q_vectors; vector++) {
+ handler = SET_HANDLER(&adapter->q_vector[vector]);
+ sprintf(adapter->name[vector], "%s:v%d-%s",
+- netdev->name, vector,
+- (handler == &ixgbe_msix_clean_rx) ? "Rx" :
+- ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx"));
++ netdev->name, vector,
++ (handler == &ixgbe_msix_clean_rx) ? "Rx" :
++ ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx"));
+ err = request_irq(adapter->msix_entries[vector].vector,
+- handler, 0, adapter->name[vector],
+- &(adapter->q_vector[vector]));
++ handler, 0, adapter->name[vector],
++ &(adapter->q_vector[vector]));
+ if (err) {
+ DPRINTK(PROBE, ERR,
+- "request_irq failed for MSIX interrupt "
+- "Error: %d\n", err);
++ "request_irq failed for MSIX interrupt "
++ "Error: %d\n", err);
+ goto free_queue_irqs;
+ }
+ }
+
+ sprintf(adapter->name[vector], "%s:lsc", netdev->name);
+ err = request_irq(adapter->msix_entries[vector].vector,
+- &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
++ &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
+ if (err) {
+ DPRINTK(PROBE, ERR,
+ "request_irq for msix_lsc failed: %d\n", err);
+@@ -1134,7 +1236,7 @@ static int ixgbe_request_msix_irqs(struc
+ free_queue_irqs:
+ for (i = vector - 1; i >= 0; i--)
+ free_irq(adapter->msix_entries[--vector].vector,
+- &(adapter->q_vector[i]));
++ &(adapter->q_vector[i]));
+ adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+@@ -1152,16 +1254,16 @@ static void ixgbe_set_itr(struct ixgbe_a
+ struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
+ struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
+
+- q_vector->tx_eitr = ixgbe_update_itr(adapter, new_itr,
+- q_vector->tx_eitr,
+- tx_ring->total_packets,
+- tx_ring->total_bytes);
+- q_vector->rx_eitr = ixgbe_update_itr(adapter, new_itr,
+- q_vector->rx_eitr,
+- rx_ring->total_packets,
+- rx_ring->total_bytes);
++ q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
++ q_vector->tx_itr,
++ tx_ring->total_packets,
++ tx_ring->total_bytes);
++ q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
++ q_vector->rx_itr,
++ rx_ring->total_packets,
++ rx_ring->total_bytes);
+
+- current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr);
++ current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+@@ -1206,19 +1308,19 @@ static irqreturn_t ixgbe_intr(int irq, v
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 eicr;
+
+-
+ /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
+ * therefore no explict interrupt disable is necessary */
+ eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+- if (!eicr)
++ if (!eicr) {
++ /* shared interrupt alert!
++ * make sure interrupts are enabled because the read will
++ * have disabled interrupts due to EIAM */
++ ixgbe_irq_enable(adapter);
+ return IRQ_NONE; /* Not our interrupt */
+-
+- if (eicr & IXGBE_EICR_LSC) {
+- adapter->lsc_int++;
+- if (!test_bit(__IXGBE_DOWN, &adapter->state))
+- mod_timer(&adapter->watchdog_timer, jiffies);
+ }
+
++ if (eicr & IXGBE_EICR_LSC)
++ ixgbe_check_lsc(adapter);
+
+ if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) {
+ adapter->tx_ring[0].total_packets = 0;
+@@ -1261,10 +1363,10 @@ static int ixgbe_request_irq(struct ixgb
+ err = ixgbe_request_msix_irqs(adapter);
+ } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
+ err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
+- netdev->name, netdev);
++ netdev->name, netdev);
+ } else {
+ err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
+- netdev->name, netdev);
++ netdev->name, netdev);
+ }
+
+ if (err)
+@@ -1288,7 +1390,7 @@ static void ixgbe_free_irq(struct ixgbe_
+ i--;
+ for (; i >= 0; i--) {
+ free_irq(adapter->msix_entries[i].vector,
+- &(adapter->q_vector[i]));
++ &(adapter->q_vector[i]));
+ }
+
+ ixgbe_reset_q_vectors(adapter);
+@@ -1335,7 +1437,7 @@ static void ixgbe_configure_msi_and_lega
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
+- EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr));
++ EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param));
+
+ ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
+ ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0);
+@@ -1347,26 +1449,31 @@ static void ixgbe_configure_msi_and_lega
+ }
+
+ /**
+- * ixgbe_configure_tx - Configure 8254x Transmit Unit after Reset
++ * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
+ {
+- u64 tdba;
++ u64 tdba, tdwba;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 i, j, tdlen, txctrl;
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+- j = adapter->tx_ring[i].reg_idx;
+- tdba = adapter->tx_ring[i].dma;
+- tdlen = adapter->tx_ring[i].count *
+- sizeof(union ixgbe_adv_tx_desc);
++ struct ixgbe_ring *ring = &adapter->tx_ring[i];
++ j = ring->reg_idx;
++ tdba = ring->dma;
++ tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
+ IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
+- (tdba & DMA_32BIT_MASK));
++ (tdba & DMA_32BIT_MASK));
+ IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
++ tdwba = ring->dma +
++ (ring->count * sizeof(union ixgbe_adv_tx_desc));
++ tdwba |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
++ IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(j), tdwba & DMA_32BIT_MASK);
++ IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(j), (tdwba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
+ IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
+@@ -1375,20 +1482,59 @@ static void ixgbe_configure_tx(struct ix
+ /* Disable Tx Head Writeback RO bit, since this hoses
+ * bookkeeping if things aren't delivered in order.
+ */
+- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
++ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
+ txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
++ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
+ }
+ }
+
+-#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
+- (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
++#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
++
++static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
++{
++ struct ixgbe_ring *rx_ring;
++ u32 srrctl;
++ int queue0;
++ unsigned long mask;
++
++ /* we must program one srrctl register per RSS queue since we
++ * have enabled RDRXCTL.MVMEN
++ */
++ mask = (unsigned long)adapter->ring_feature[RING_F_RSS].mask;
++ queue0 = index & mask;
++ index = index & mask;
++
++ rx_ring = &adapter->rx_ring[queue0];
++
++ srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
++
++ srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
++ srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
++
++ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
++ srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
++ srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
++ srrctl |= ((IXGBE_RX_HDR_SIZE <<
++ IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
++ IXGBE_SRRCTL_BSIZEHDR_MASK);
++ } else {
++ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
++
++ if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
++ srrctl |= IXGBE_RXBUFFER_2048 >>
++ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
++ else
++ srrctl |= rx_ring->rx_buf_len >>
++ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
++ }
++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
++}
+
+-#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
++#ifdef CONFIG_IXGBE_LRO
+ /**
+ * ixgbe_get_skb_hdr - helper function for LRO header processing
+ * @skb: pointer to sk_buff to be added to LRO packet
+- * @iphdr: pointer to tcp header structure
++ * @iphdr: pointer to ip header structure
+ * @tcph: pointer to tcp header structure
+ * @hdr_flags: pointer to header flags
+ * @priv: private data
+@@ -1399,8 +1545,8 @@ static int ixgbe_get_skb_hdr(struct sk_b
+ union ixgbe_adv_rx_desc *rx_desc = priv;
+
+ /* Verify that this is a valid IPv4 TCP packet */
+- if (!(rx_desc->wb.lower.lo_dword.pkt_info &
+- (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)))
++ if (!((ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_IPV4) &&
++ (ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_TCP)))
+ return -1;
+
+ /* Set network headers */
+@@ -1412,8 +1558,12 @@ static int ixgbe_get_skb_hdr(struct sk_b
+ return 0;
+ }
+
++#endif /* CONFIG_IXGBE_LRO */
++#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
++ (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
++
+ /**
+- * ixgbe_configure_rx - Configure 8254x Receive Unit after Reset
++ * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+@@ -1426,25 +1576,26 @@ static void ixgbe_configure_rx(struct ix
+ int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ int i, j;
+ u32 rdlen, rxctrl, rxcsum;
+- u32 random[10];
++ static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
++ 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
++ 0x6A3E67EA, 0x14364D17, 0x3BED200D};
+ u32 fctrl, hlreg0;
+ u32 pages;
+- u32 reta = 0, mrqc, srrctl;
++ u32 reta = 0, mrqc;
++ u32 rdrxctl;
++ int rx_buf_len;
+
+ /* Decide whether to use packet split mode or not */
+- if (netdev->mtu > ETH_DATA_LEN)
+- adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+- else
+- adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
++ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+
+ /* Set the RX buffer length according to the mode */
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+- adapter->rx_buf_len = IXGBE_RX_HDR_SIZE;
++ rx_buf_len = IXGBE_RX_HDR_SIZE;
+ } else {
+ if (netdev->mtu <= ETH_DATA_LEN)
+- adapter->rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
++ rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+ else
+- adapter->rx_buf_len = ALIGN(max_frame, 1024);
++ rx_buf_len = ALIGN(max_frame, 1024);
+ }
+
+ fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
+@@ -1461,28 +1612,6 @@ static void ixgbe_configure_rx(struct ix
+
+ pages = PAGE_USE_COUNT(adapter->netdev->mtu);
+
+- srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0));
+- srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
+- srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
+-
+- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+- srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+- srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+- srrctl |= ((IXGBE_RX_HDR_SIZE <<
+- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+- IXGBE_SRRCTL_BSIZEHDR_MASK);
+- } else {
+- srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+-
+- if (adapter->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
+- srrctl |=
+- IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+- else
+- srrctl |=
+- adapter->rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+- }
+- IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl);
+-
+ rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
+ /* disable receives while setting up the descriptors */
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+@@ -1492,25 +1621,45 @@ static void ixgbe_configure_rx(struct ix
+ * the Base and Length of the Rx Descriptor Ring */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rdba = adapter->rx_ring[i].dma;
+- IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), (rdba & DMA_32BIT_MASK));
+- IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
+- IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), rdlen);
+- IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
+- IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
+- adapter->rx_ring[i].head = IXGBE_RDH(i);
+- adapter->rx_ring[i].tail = IXGBE_RDT(i);
+- }
+-
+- /* Intitial LRO Settings */
+- adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE;
+- adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS;
+- adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
+- adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
+- if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
+- adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
+- adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
+- adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
+- adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
++ j = adapter->rx_ring[i].reg_idx;
++ IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_32BIT_MASK));
++ IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
++ IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
++ IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
++ IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
++ adapter->rx_ring[i].head = IXGBE_RDH(j);
++ adapter->rx_ring[i].tail = IXGBE_RDT(j);
++ adapter->rx_ring[i].rx_buf_len = rx_buf_len;
++#ifdef CONFIG_IXGBE_LRO
++ /* Intitial LRO Settings */
++ adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE;
++ adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS;
++ adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
++ adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
++ if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
++ adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
++ adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
++ adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
++ adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
++#endif
++
++ ixgbe_configure_srrctl(adapter, j);
++ }
++
++ /*
++ * For VMDq support of different descriptor types or
++ * buffer sizes through the use of multiple SRRCTL
++ * registers, RDRXCTL.MVMEN must be set to 1
++ *
++ * also, the manual doesn't mention it clearly but DCA hints
++ * will only use queue 0's tags unless this bit is set. Side
++ * effects of setting this bit are only that SRRCTL must be
++ * fully programmed [0..15]
++ */
++ rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
++ rdrxctl |= IXGBE_RDRXCTL_MVMEN;
++ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
++
+
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+ /* Fill out redirection table */
+@@ -1525,22 +1674,20 @@ static void ixgbe_configure_rx(struct ix
+ }
+
+ /* Fill out hash function seeds */
+- /* XXX use a random constant here to glue certain flows */
+- get_random_bytes(&random[0], 40);
+ for (i = 0; i < 10; i++)
+- IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
++ IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
+
+ mrqc = IXGBE_MRQC_RSSEN
+ /* Perform hash on these packet types */
+- | IXGBE_MRQC_RSS_FIELD_IPV4
+- | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
+- | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
+- | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
+- | IXGBE_MRQC_RSS_FIELD_IPV6_EX
+- | IXGBE_MRQC_RSS_FIELD_IPV6
+- | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
+- | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
+- | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
++ | IXGBE_MRQC_RSS_FIELD_IPV4
++ | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
++ | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
++ | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
++ | IXGBE_MRQC_RSS_FIELD_IPV6_EX
++ | IXGBE_MRQC_RSS_FIELD_IPV6
++ | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
++ | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
++ | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ }
+
+@@ -1562,7 +1709,7 @@ static void ixgbe_configure_rx(struct ix
+ }
+
+ static void ixgbe_vlan_rx_register(struct net_device *netdev,
+- struct vlan_group *grp)
++ struct vlan_group *grp)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ u32 ctrl;
+@@ -1586,14 +1733,16 @@ static void ixgbe_vlan_rx_register(struc
+ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
++ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* add VID to filter table */
+- ixgbe_set_vfta(&adapter->hw, vid, 0, true);
++ hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
+ }
+
+ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
++ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_disable(adapter);
+@@ -1604,7 +1753,7 @@ static void ixgbe_vlan_rx_kill_vid(struc
+ ixgbe_irq_enable(adapter);
+
+ /* remove VID from filter table */
+- ixgbe_set_vfta(&adapter->hw, vid, 0, false);
++ hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
+ }
+
+ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
+@@ -1621,23 +1770,37 @@ static void ixgbe_restore_vlan(struct ix
+ }
+ }
+
++static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
++{
++ struct dev_mc_list *mc_ptr;
++ u8 *addr = *mc_addr_ptr;
++ *vmdq = 0;
++
++ mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
++ if (mc_ptr->next)
++ *mc_addr_ptr = mc_ptr->next->dmi_addr;
++ else
++ *mc_addr_ptr = NULL;
++
++ return addr;
++}
++
+ /**
+- * ixgbe_set_multi - Multicast and Promiscuous mode set
++ * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+- * The set_multi entry point is called whenever the multicast address
+- * list or the network interface flags are updated. This routine is
+- * responsible for configuring the hardware for proper multicast,
+- * promiscuous mode, and all-multi behavior.
++ * The set_rx_method entry point is called whenever the unicast/multicast
++ * address list or the network interface flags are updated. This routine is
++ * responsible for configuring the hardware for proper unicast, multicast and
++ * promiscuous mode.
+ **/
+-static void ixgbe_set_multi(struct net_device *netdev)
++static void ixgbe_set_rx_mode(struct net_device *netdev)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+- struct dev_mc_list *mc_ptr;
+- u8 *mta_list;
+ u32 fctrl, vlnctrl;
+- int i;
++ u8 *addr_list = NULL;
++ int addr_count = 0;
+
+ /* Check for Promiscuous and All Multicast modes */
+
+@@ -1645,6 +1808,7 @@ static void ixgbe_set_multi(struct net_d
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+
+ if (netdev->flags & IFF_PROMISC) {
++ hw->addr_ctrl.user_set_promisc = 1;
+ fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+ vlnctrl &= ~IXGBE_VLNCTRL_VFE;
+ } else {
+@@ -1655,33 +1819,25 @@ static void ixgbe_set_multi(struct net_d
+ fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+ }
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
++ hw->addr_ctrl.user_set_promisc = 0;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+
+- if (netdev->mc_count) {
+- mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC);
+- if (!mta_list)
+- return;
+-
+- /* Shared function expects packed array of only addresses. */
+- mc_ptr = netdev->mc_list;
+-
+- for (i = 0; i < netdev->mc_count; i++) {
+- if (!mc_ptr)
+- break;
+- memcpy(mta_list + (i * ETH_ALEN), mc_ptr->dmi_addr,
+- ETH_ALEN);
+- mc_ptr = mc_ptr->next;
+- }
+-
+- ixgbe_update_mc_addr_list(hw, mta_list, i, 0);
+- kfree(mta_list);
+- } else {
+- ixgbe_update_mc_addr_list(hw, NULL, 0, 0);
+- }
+-
++ /* reprogram secondary unicast list */
++ addr_count = netdev->uc_count;
++ if (addr_count)
++ addr_list = netdev->uc_list->dmi_addr;
++ hw->mac.ops.update_uc_addr_list(hw, addr_list, addr_count,
++ ixgbe_addr_list_itr);
++
++ /* reprogram multicast list */
++ addr_count = netdev->mc_count;
++ if (addr_count)
++ addr_list = netdev->mc_list->dmi_addr;
++ hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
++ ixgbe_addr_list_itr);
+ }
+
+ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
+@@ -1695,10 +1851,16 @@ static void ixgbe_napi_enable_all(struct
+ q_vectors = 1;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
++ struct napi_struct *napi;
+ q_vector = &adapter->q_vector[q_idx];
+ if (!q_vector->rxr_count)
+ continue;
+- napi_enable(&q_vector->napi);
++ napi = &q_vector->napi;
++ if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) &&
++ (q_vector->rxr_count > 1))
++ napi->poll = &ixgbe_clean_rxonly_many;
++
++ napi_enable(napi);
+ }
+ }
+
+@@ -1725,7 +1887,7 @@ static void ixgbe_configure(struct ixgbe
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+- ixgbe_set_multi(netdev);
++ ixgbe_set_rx_mode(netdev);
+
+ ixgbe_restore_vlan(adapter);
+
+@@ -1733,7 +1895,7 @@ static void ixgbe_configure(struct ixgbe
+ ixgbe_configure_rx(adapter);
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
+- (adapter->rx_ring[i].count - 1));
++ (adapter->rx_ring[i].count - 1));
+ }
+
+ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
+@@ -1751,7 +1913,7 @@ static int ixgbe_up_complete(struct ixgb
+ (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
+- IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
++ IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
+ } else {
+ /* MSI only */
+ gpie = 0;
+@@ -1778,6 +1940,8 @@ static int ixgbe_up_complete(struct ixgb
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ j = adapter->tx_ring[i].reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
++ /* enable WTHRESH=8 descriptors, to encourage burst writeback */
++ txdctl |= (8 << 16);
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
+ }
+@@ -1812,6 +1976,8 @@ static int ixgbe_up_complete(struct ixgb
+
+ /* bring the link up in the watchdog, this could race with our first
+ * link up interrupt but shouldn't be a problem */
++ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
++ adapter->link_check_timeout = jiffies;
+ mod_timer(&adapter->watchdog_timer, jiffies);
+ return 0;
+ }
+@@ -1836,50 +2002,14 @@ int ixgbe_up(struct ixgbe_adapter *adapt
+
+ void ixgbe_reset(struct ixgbe_adapter *adapter)
+ {
+- if (ixgbe_init_hw(&adapter->hw))
+- DPRINTK(PROBE, ERR, "Hardware Error\n");
++ struct ixgbe_hw *hw = &adapter->hw;
++ if (hw->mac.ops.init_hw(hw))
++ dev_err(&adapter->pdev->dev, "Hardware Error\n");
+
+ /* reprogram the RAR[0] in case user changed it. */
+- ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
+-
+-}
+-
+-#ifdef CONFIG_PM
+-static int ixgbe_resume(struct pci_dev *pdev)
+-{
+- struct net_device *netdev = pci_get_drvdata(pdev);
+- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+- u32 err;
+-
+- pci_set_power_state(pdev, PCI_D0);
+- pci_restore_state(pdev);
+- err = pci_enable_device(pdev);
+- if (err) {
+- printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \
+- "suspend\n");
+- return err;
+- }
+- pci_set_master(pdev);
+-
+- pci_enable_wake(pdev, PCI_D3hot, 0);
+- pci_enable_wake(pdev, PCI_D3cold, 0);
+-
+- if (netif_running(netdev)) {
+- err = ixgbe_request_irq(adapter);
+- if (err)
+- return err;
+- }
+-
+- ixgbe_reset(adapter);
+-
+- if (netif_running(netdev))
+- ixgbe_up(adapter);
+-
+- netif_device_attach(netdev);
++ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+- return 0;
+ }
+-#endif
+
+ /**
+ * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
+@@ -1887,7 +2017,7 @@ static int ixgbe_resume(struct pci_dev *
+ * @rx_ring: ring to free buffers from
+ **/
+ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
+- struct ixgbe_ring *rx_ring)
++ struct ixgbe_ring *rx_ring)
+ {
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long size;
+@@ -1901,8 +2031,8 @@ static void ixgbe_clean_rx_ring(struct i
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+ if (rx_buffer_info->dma) {
+ pci_unmap_single(pdev, rx_buffer_info->dma,
+- adapter->rx_buf_len,
+- PCI_DMA_FROMDEVICE);
++ rx_ring->rx_buf_len,
++ PCI_DMA_FROMDEVICE);
+ rx_buffer_info->dma = 0;
+ }
+ if (rx_buffer_info->skb) {
+@@ -1911,12 +2041,12 @@ static void ixgbe_clean_rx_ring(struct i
+ }
+ if (!rx_buffer_info->page)
+ continue;
+- pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE,
+- PCI_DMA_FROMDEVICE);
++ pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
++ PCI_DMA_FROMDEVICE);
+ rx_buffer_info->page_dma = 0;
+-
+ put_page(rx_buffer_info->page);
+ rx_buffer_info->page = NULL;
++ rx_buffer_info->page_offset = 0;
+ }
+
+ size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
+@@ -1938,7 +2068,7 @@ static void ixgbe_clean_rx_ring(struct i
+ * @tx_ring: ring to be cleaned
+ **/
+ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
+- struct ixgbe_ring *tx_ring)
++ struct ixgbe_ring *tx_ring)
+ {
+ struct ixgbe_tx_buffer *tx_buffer_info;
+ unsigned long size;
+@@ -1991,96 +2121,85 @@ static void ixgbe_clean_all_tx_rings(str
+ void ixgbe_down(struct ixgbe_adapter *adapter)
+ {
+ struct net_device *netdev = adapter->netdev;
++ struct ixgbe_hw *hw = &adapter->hw;
+ u32 rxctrl;
++ u32 txdctl;
++ int i, j;
+
+ /* signal that we are down to the interrupt handler */
+ set_bit(__IXGBE_DOWN, &adapter->state);
+
+ /* disable receives */
+- rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
+- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL,
+- rxctrl & ~IXGBE_RXCTRL_RXEN);
++ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
++ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
+
+ netif_tx_disable(netdev);
+
+- /* disable transmits in the hardware */
+-
+- /* flush both disables */
+- IXGBE_WRITE_FLUSH(&adapter->hw);
++ IXGBE_WRITE_FLUSH(hw);
+ msleep(10);
+
++ netif_tx_stop_all_queues(netdev);
++
+ ixgbe_irq_disable(adapter);
+
+ ixgbe_napi_disable_all(adapter);
++
+ del_timer_sync(&adapter->watchdog_timer);
++ cancel_work_sync(&adapter->watchdog_task);
++
++ /* disable transmits in the hardware now that interrupts are off */
++ for (i = 0; i < adapter->num_tx_queues; i++) {
++ j = adapter->tx_ring[i].reg_idx;
++ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
++ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
++ (txdctl & ~IXGBE_TXDCTL_ENABLE));
++ }
+
+ netif_carrier_off(netdev);
+- netif_tx_stop_all_queues(netdev);
+
++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
++ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
++ adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
++ dca_remove_requester(&adapter->pdev->dev);
++ }
++
++#endif
+ if (!pci_channel_offline(adapter->pdev))
+ ixgbe_reset(adapter);
+ ixgbe_clean_all_tx_rings(adapter);
+ ixgbe_clean_all_rx_rings(adapter);
+
++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
++ /* since we reset the hardware DCA settings were cleared */
++ if (dca_add_requester(&adapter->pdev->dev) == 0) {
++ adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
++ /* always use CB2 mode, difference is masked
++ * in the CB driver */
++ IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
++ ixgbe_setup_dca(adapter);
++ }
++#endif
+ }
+
+-static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
++/**
++ * ixgbe_poll - NAPI Rx polling callback
++ * @napi: structure for representing this polling device
++ * @budget: how many packets driver is allowed to clean
++ *
++ * This function is used for legacy and MSI, NAPI mode
++ **/
++static int ixgbe_poll(struct napi_struct *napi, int budget)
+ {
+- struct net_device *netdev = pci_get_drvdata(pdev);
+- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+-#ifdef CONFIG_PM
+- int retval = 0;
+-#endif
+-
+- netif_device_detach(netdev);
+-
+- if (netif_running(netdev)) {
+- ixgbe_down(adapter);
+- ixgbe_free_irq(adapter);
+- }
+-
+-#ifdef CONFIG_PM
+- retval = pci_save_state(pdev);
+- if (retval)
+- return retval;
+-#endif
+-
+- pci_enable_wake(pdev, PCI_D3hot, 0);
+- pci_enable_wake(pdev, PCI_D3cold, 0);
+-
+- ixgbe_release_hw_control(adapter);
+-
+- pci_disable_device(pdev);
+-
+- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+-
+- return 0;
+-}
+-
+-static void ixgbe_shutdown(struct pci_dev *pdev)
+-{
+- ixgbe_suspend(pdev, PMSG_SUSPEND);
+-}
+-
+-/**
+- * ixgbe_poll - NAPI Rx polling callback
+- * @napi: structure for representing this polling device
+- * @budget: how many packets driver is allowed to clean
+- *
+- * This function is used for legacy and MSI, NAPI mode
+- **/
+-static int ixgbe_poll(struct napi_struct *napi, int budget)
+-{
+- struct ixgbe_q_vector *q_vector = container_of(napi,
+- struct ixgbe_q_vector, napi);
+- struct ixgbe_adapter *adapter = q_vector->adapter;
+- int tx_cleaned = 0, work_done = 0;
+-
+-#ifdef CONFIG_DCA
+- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
+- ixgbe_update_tx_dca(adapter, adapter->tx_ring);
+- ixgbe_update_rx_dca(adapter, adapter->rx_ring);
+- }
++ struct ixgbe_q_vector *q_vector = container_of(napi,
++ struct ixgbe_q_vector, napi);
++ struct ixgbe_adapter *adapter = q_vector->adapter;
++ int tx_cleaned, work_done = 0;
++
++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
++ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
++ ixgbe_update_tx_dca(adapter, adapter->tx_ring);
++ ixgbe_update_rx_dca(adapter, adapter->rx_ring);
++ }
+ #endif
+
+ tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
+@@ -2092,12 +2211,11 @@ static int ixgbe_poll(struct napi_struct
+ /* If budget not fully consumed, exit the polling mode */
+ if (work_done < budget) {
+ netif_rx_complete(adapter->netdev, napi);
+- if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
++ if (adapter->itr_setting & 3)
+ ixgbe_set_itr(adapter);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable(adapter);
+ }
+-
+ return work_done;
+ }
+
+@@ -2123,8 +2241,48 @@ static void ixgbe_reset_task(struct work
+ ixgbe_reinit_locked(adapter);
+ }
+
++static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
++{
++ int nrq = 1, ntq = 1;
++ int feature_mask = 0, rss_i, rss_m;
++
++ /* Number of supported queues */
++ switch (adapter->hw.mac.type) {
++ case ixgbe_mac_82598EB:
++ rss_i = adapter->ring_feature[RING_F_RSS].indices;
++ rss_m = 0;
++ feature_mask |= IXGBE_FLAG_RSS_ENABLED;
++
++ switch (adapter->flags & feature_mask) {
++ case (IXGBE_FLAG_RSS_ENABLED):
++ rss_m = 0xF;
++ nrq = rss_i;
++ ntq = rss_i;
++ break;
++ case 0:
++ default:
++ rss_i = 0;
++ rss_m = 0;
++ nrq = 1;
++ ntq = 1;
++ break;
++ }
++
++ adapter->ring_feature[RING_F_RSS].indices = rss_i;
++ adapter->ring_feature[RING_F_RSS].mask = rss_m;
++ break;
++ default:
++ nrq = 1;
++ ntq = 1;
++ break;
++ }
++
++ adapter->num_rx_queues = nrq;
++ adapter->num_tx_queues = ntq;
++}
++
+ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
+- int vectors)
++ int vectors)
+ {
+ int err, vector_threshold;
+
+@@ -2143,7 +2301,7 @@ static void ixgbe_acquire_msix_vectors(s
+ */
+ while (vectors >= vector_threshold) {
+ err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+- vectors);
++ vectors);
+ if (!err) /* Success in acquiring all requested vectors. */
+ break;
+ else if (err < 0)
+@@ -2162,54 +2320,13 @@ static void ixgbe_acquire_msix_vectors(s
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+- adapter->num_tx_queues = 1;
+- adapter->num_rx_queues = 1;
++ ixgbe_set_num_queues(adapter);
+ } else {
+ adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
+ adapter->num_msix_vectors = vectors;
+ }
+ }
+
+-static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
+-{
+- int nrq, ntq;
+- int feature_mask = 0, rss_i, rss_m;
+-
+- /* Number of supported queues */
+- switch (adapter->hw.mac.type) {
+- case ixgbe_mac_82598EB:
+- rss_i = adapter->ring_feature[RING_F_RSS].indices;
+- rss_m = 0;
+- feature_mask |= IXGBE_FLAG_RSS_ENABLED;
+-
+- switch (adapter->flags & feature_mask) {
+- case (IXGBE_FLAG_RSS_ENABLED):
+- rss_m = 0xF;
+- nrq = rss_i;
+- ntq = rss_i;
+- break;
+- case 0:
+- default:
+- rss_i = 0;
+- rss_m = 0;
+- nrq = 1;
+- ntq = 1;
+- break;
+- }
+-
+- adapter->ring_feature[RING_F_RSS].indices = rss_i;
+- adapter->ring_feature[RING_F_RSS].mask = rss_m;
+- break;
+- default:
+- nrq = 1;
+- ntq = 1;
+- break;
+- }
+-
+- adapter->num_rx_queues = nrq;
+- adapter->num_tx_queues = ntq;
+-}
+-
+ /**
+ * ixgbe_cache_ring_register - Descriptor ring to register mapping
+ * @adapter: board private structure to initialize
+@@ -2219,9 +2336,6 @@ static void __devinit ixgbe_set_num_queu
+ **/
+ static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
+ {
+- /* TODO: Remove all uses of the indices in the cases where multiple
+- * features are OR'd together, if the feature set makes sense.
+- */
+ int feature_mask = 0, rss_i;
+ int i, txr_idx, rxr_idx;
+
+@@ -2262,21 +2376,22 @@ static int __devinit ixgbe_alloc_queues(
+ int i;
+
+ adapter->tx_ring = kcalloc(adapter->num_tx_queues,
+- sizeof(struct ixgbe_ring), GFP_KERNEL);
++ sizeof(struct ixgbe_ring), GFP_KERNEL);
+ if (!adapter->tx_ring)
+ goto err_tx_ring_allocation;
+
+ adapter->rx_ring = kcalloc(adapter->num_rx_queues,
+- sizeof(struct ixgbe_ring), GFP_KERNEL);
++ sizeof(struct ixgbe_ring), GFP_KERNEL);
+ if (!adapter->rx_ring)
+ goto err_rx_ring_allocation;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+- adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD;
++ adapter->tx_ring[i].count = adapter->tx_ring_count;
+ adapter->tx_ring[i].queue_index = i;
+ }
++
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+- adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD;
++ adapter->rx_ring[i].count = adapter->rx_ring_count;
+ adapter->rx_ring[i].queue_index = i;
+ }
+
+@@ -2298,25 +2413,19 @@ err_tx_ring_allocation:
+ * capabilities of the hardware and the kernel.
+ **/
+ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
+- *adapter)
++ *adapter)
+ {
+ int err = 0;
+ int vector, v_budget;
+
+ /*
+- * Set the default interrupt throttle rate.
+- */
+- adapter->rx_eitr = (1000000 / IXGBE_DEFAULT_ITR_RX_USECS);
+- adapter->tx_eitr = (1000000 / IXGBE_DEFAULT_ITR_TX_USECS);
+-
+- /*
+ * It's easy to be greedy for MSI-X vectors, but it really
+ * doesn't do us much good if we have a lot more vectors
+ * than CPU's. So let's be conservative and only ask for
+ * (roughly) twice the number of vectors as there are CPU's.
+ */
+ v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
+- (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
++ (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
+
+ /*
+ * At the same time, hardware can only support a maximum of
+@@ -2330,7 +2439,7 @@ static int __devinit ixgbe_set_interrupt
+ /* A failure in MSI-X entry allocation isn't fatal, but it does
+ * mean we disable MSI-X capabilities of the adapter. */
+ adapter->msix_entries = kcalloc(v_budget,
+- sizeof(struct msix_entry), GFP_KERNEL);
++ sizeof(struct msix_entry), GFP_KERNEL);
+ if (!adapter->msix_entries) {
+ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+ ixgbe_set_num_queues(adapter);
+@@ -2339,7 +2448,7 @@ static int __devinit ixgbe_set_interrupt
+ err = ixgbe_alloc_queues(adapter);
+ if (err) {
+ DPRINTK(PROBE, ERR, "Unable to allocate memory "
+- "for queues\n");
++ "for queues\n");
+ goto out;
+ }
+
+@@ -2360,7 +2469,7 @@ try_msi:
+ adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
+ } else {
+ DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
+- "falling back to legacy. Error: %d\n", err);
++ "falling back to legacy. Error: %d\n", err);
+ /* reset err */
+ err = 0;
+ }
+@@ -2416,9 +2525,9 @@ static int __devinit ixgbe_init_interrup
+ }
+
+ DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
+- "Tx Queue count = %u\n",
+- (adapter->num_rx_queues > 1) ? "Enabled" :
+- "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
++ "Tx Queue count = %u\n",
++ (adapter->num_rx_queues > 1) ? "Enabled" :
++ "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
+
+ set_bit(__IXGBE_DOWN, &adapter->state);
+
+@@ -2445,33 +2554,44 @@ static int __devinit ixgbe_sw_init(struc
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned int rss;
+
++ /* PCI config space info */
++
++ hw->vendor_id = pdev->vendor;
++ hw->device_id = pdev->device;
++ hw->revision_id = pdev->revision;
++ hw->subsystem_vendor_id = pdev->subsystem_vendor;
++ hw->subsystem_device_id = pdev->subsystem_device;
++
+ /* Set capability flags */
+ rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
+ adapter->ring_feature[RING_F_RSS].indices = rss;
+ adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
+
+- /* Enable Dynamic interrupt throttling by default */
+- adapter->rx_eitr = 1;
+- adapter->tx_eitr = 1;
+-
+ /* default flow control settings */
+- hw->fc.original_type = ixgbe_fc_full;
+- hw->fc.type = ixgbe_fc_full;
++ hw->fc.original_type = ixgbe_fc_none;
++ hw->fc.type = ixgbe_fc_none;
++ hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
++ hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
++ hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
++ hw->fc.send_xon = true;
+
+ /* select 10G link by default */
+ hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
+- if (hw->mac.ops.reset(hw)) {
+- dev_err(&pdev->dev, "HW Init failed\n");
+- return -EIO;
+- }
+- if (hw->mac.ops.setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true,
+- false)) {
+- dev_err(&pdev->dev, "Link Speed setup failed\n");
+- return -EIO;
+- }
++
++ /* enable itr by default in dynamic mode */
++ adapter->itr_setting = 1;
++ adapter->eitr_param = 20000;
++
++ /* set defaults for eitr in MegaBytes */
++ adapter->eitr_low = 10;
++ adapter->eitr_high = 20;
++
++ /* set default ring sizes */
++ adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
++ adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
+
+ /* initialize eeprom parameters */
+- if (ixgbe_init_eeprom(hw)) {
++ if (ixgbe_init_eeprom_params_generic(hw)) {
+ dev_err(&pdev->dev, "EEPROM initialization failed\n");
+ return -EIO;
+ }
+@@ -2487,105 +2607,160 @@ static int __devinit ixgbe_sw_init(struc
+ /**
+ * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+- * @txdr: tx descriptor ring (for a specific queue) to setup
++ * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
+- struct ixgbe_ring *txdr)
++ struct ixgbe_ring *tx_ring)
+ {
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+- size = sizeof(struct ixgbe_tx_buffer) * txdr->count;
+- txdr->tx_buffer_info = vmalloc(size);
+- if (!txdr->tx_buffer_info) {
+- DPRINTK(PROBE, ERR,
+- "Unable to allocate memory for the transmit descriptor ring\n");
+- return -ENOMEM;
+- }
+- memset(txdr->tx_buffer_info, 0, size);
++ size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
++ tx_ring->tx_buffer_info = vmalloc(size);
++ if (!tx_ring->tx_buffer_info)
++ goto err;
++ memset(tx_ring->tx_buffer_info, 0, size);
+
+ /* round up to nearest 4K */
+- txdr->size = txdr->count * sizeof(union ixgbe_adv_tx_desc);
+- txdr->size = ALIGN(txdr->size, 4096);
++ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc) +
++ sizeof(u32);
++ tx_ring->size = ALIGN(tx_ring->size, 4096);
++
++ tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
++ &tx_ring->dma);
++ if (!tx_ring->desc)
++ goto err;
+
+- txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+- if (!txdr->desc) {
+- vfree(txdr->tx_buffer_info);
+- DPRINTK(PROBE, ERR,
+- "Memory allocation failed for the tx desc ring\n");
+- return -ENOMEM;
+- }
++ tx_ring->next_to_use = 0;
++ tx_ring->next_to_clean = 0;
++ tx_ring->work_limit = tx_ring->count;
++ return 0;
+
+- txdr->next_to_use = 0;
+- txdr->next_to_clean = 0;
+- txdr->work_limit = txdr->count;
++err:
++ vfree(tx_ring->tx_buffer_info);
++ tx_ring->tx_buffer_info = NULL;
++ DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
++ "descriptor ring\n");
++ return -ENOMEM;
++}
+
+- return 0;
++/**
++ * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
++ * @adapter: board private structure
++ *
++ * If this function returns with an error, then it's possible one or
++ * more of the rings is populated (while the rest are not). It is the
++ * callers duty to clean those orphaned rings.
++ *
++ * Return 0 on success, negative on failure
++ **/
++static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
++{
++ int i, err = 0;
++
++ for (i = 0; i < adapter->num_tx_queues; i++) {
++ err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
++ if (!err)
++ continue;
++ DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
++ break;
++ }
++
++ return err;
+ }
+
+ /**
+ * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+- * @rxdr: rx descriptor ring (for a specific queue) to setup
++ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
+- struct ixgbe_ring *rxdr)
++ struct ixgbe_ring *rx_ring)
+ {
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
++#ifdef CONFIG_IXGBE_LRO
+ size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS;
+- rxdr->lro_mgr.lro_arr = vmalloc(size);
+- if (!rxdr->lro_mgr.lro_arr)
++ rx_ring->lro_mgr.lro_arr = vmalloc(size);
++ if (!rx_ring->lro_mgr.lro_arr)
+ return -ENOMEM;
+- memset(rxdr->lro_mgr.lro_arr, 0, size);
+-
+- size = sizeof(struct ixgbe_rx_buffer) * rxdr->count;
+- rxdr->rx_buffer_info = vmalloc(size);
+- if (!rxdr->rx_buffer_info) {
++ memset(rx_ring->lro_mgr.lro_arr, 0, size);
++#endif
++ size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
++ rx_ring->rx_buffer_info = vmalloc(size);
++ if (!rx_ring->rx_buffer_info) {
+ DPRINTK(PROBE, ERR,
+- "vmalloc allocation failed for the rx desc ring\n");
++ "vmalloc allocation failed for the rx desc ring\n");
+ goto alloc_failed;
+ }
+- memset(rxdr->rx_buffer_info, 0, size);
++ memset(rx_ring->rx_buffer_info, 0, size);
+
+ /* Round up to nearest 4K */
+- rxdr->size = rxdr->count * sizeof(union ixgbe_adv_rx_desc);
+- rxdr->size = ALIGN(rxdr->size, 4096);
++ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
++ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+- rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
++ rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
+
+- if (!rxdr->desc) {
++ if (!rx_ring->desc) {
+ DPRINTK(PROBE, ERR,
+- "Memory allocation failed for the rx desc ring\n");
+- vfree(rxdr->rx_buffer_info);
++ "Memory allocation failed for the rx desc ring\n");
++ vfree(rx_ring->rx_buffer_info);
+ goto alloc_failed;
+ }
+
+- rxdr->next_to_clean = 0;
+- rxdr->next_to_use = 0;
++ rx_ring->next_to_clean = 0;
++ rx_ring->next_to_use = 0;
+
+ return 0;
+
+ alloc_failed:
+- vfree(rxdr->lro_mgr.lro_arr);
+- rxdr->lro_mgr.lro_arr = NULL;
++#ifdef CONFIG_IXGBE_LRO
++ vfree(rx_ring->lro_mgr.lro_arr);
++ rx_ring->lro_mgr.lro_arr = NULL;
++#endif
+ return -ENOMEM;
+ }
+
+ /**
++ * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
++ * @adapter: board private structure
++ *
++ * If this function returns with an error, then it's possible one or
++ * more of the rings is populated (while the rest are not). It is the
++ * callers duty to clean those orphaned rings.
++ *
++ * Return 0 on success, negative on failure
++ **/
++
++static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
++{
++ int i, err = 0;
++
++ for (i = 0; i < adapter->num_rx_queues; i++) {
++ err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
++ if (!err)
++ continue;
++ DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
++ break;
++ }
++
++ return err;
++}
++
++/**
+ * ixgbe_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+-static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
+- struct ixgbe_ring *tx_ring)
++void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
++ struct ixgbe_ring *tx_ring)
+ {
+ struct pci_dev *pdev = adapter->pdev;
+
+@@ -2620,13 +2795,15 @@ static void ixgbe_free_all_tx_resources(
+ *
+ * Free all receive software resources
+ **/
+-static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
+- struct ixgbe_ring *rx_ring)
++void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
++ struct ixgbe_ring *rx_ring)
+ {
+ struct pci_dev *pdev = adapter->pdev;
+
++#ifdef CONFIG_IXGBE_LRO
+ vfree(rx_ring->lro_mgr.lro_arr);
+ rx_ring->lro_mgr.lro_arr = NULL;
++#endif
+
+ ixgbe_clean_rx_ring(adapter, rx_ring);
+
+@@ -2653,59 +2830,6 @@ static void ixgbe_free_all_rx_resources(
+ }
+
+ /**
+- * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
+- * @adapter: board private structure
+- *
+- * If this function returns with an error, then it's possible one or
+- * more of the rings is populated (while the rest are not). It is the
+- * callers duty to clean those orphaned rings.
+- *
+- * Return 0 on success, negative on failure
+- **/
+-static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
+-{
+- int i, err = 0;
+-
+- for (i = 0; i < adapter->num_tx_queues; i++) {
+- err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+- if (err) {
+- DPRINTK(PROBE, ERR,
+- "Allocation for Tx Queue %u failed\n", i);
+- break;
+- }
+- }
+-
+- return err;
+-}
+-
+-/**
+- * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
+- * @adapter: board private structure
+- *
+- * If this function returns with an error, then it's possible one or
+- * more of the rings is populated (while the rest are not). It is the
+- * callers duty to clean those orphaned rings.
+- *
+- * Return 0 on success, negative on failure
+- **/
+-
+-static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
+-{
+- int i, err = 0;
+-
+- for (i = 0; i < adapter->num_rx_queues; i++) {
+- err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+- if (err) {
+- DPRINTK(PROBE, ERR,
+- "Allocation for Rx Queue %u failed\n", i);
+- break;
+- }
+- }
+-
+- return err;
+-}
+-
+-/**
+ * ixgbe_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+@@ -2717,12 +2841,12 @@ static int ixgbe_change_mtu(struct net_d
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+- if ((max_frame < (ETH_ZLEN + ETH_FCS_LEN)) ||
+- (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
++ /* MTU < 68 is an error and causes problems on some kernels */
++ if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
+ return -EINVAL;
+
+ DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
+- netdev->mtu, new_mtu);
++ netdev->mtu, new_mtu);
+ /* must set new MTU before calling down or up */
+ netdev->mtu = new_mtu;
+
+@@ -2817,6 +2941,135 @@ static int ixgbe_close(struct net_device
+ }
+
+ /**
++ * ixgbe_napi_add_all - prep napi structs for use
++ * @adapter: private struct
++ * helper function to napi_add each possible q_vector->napi
++ */
++static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
++{
++ int q_idx, q_vectors;
++ int (*poll)(struct napi_struct *, int);
++
++ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
++ poll = &ixgbe_clean_rxonly;
++ /* Only enable as many vectors as we have rx queues. */
++ q_vectors = adapter->num_rx_queues;
++ } else {
++ poll = &ixgbe_poll;
++ /* only one q_vector for legacy modes */
++ q_vectors = 1;
++ }
++
++ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
++ struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
++ netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
++ }
++}
++
++static void ixgbe_napi_del_all(struct ixgbe_adapter *adapter)
++{
++ int q_idx;
++ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
++
++ /* legacy and MSI only use one vector */
++ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
++ q_vectors = 1;
++
++ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
++ struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
++ if (!q_vector->rxr_count)
++ continue;
++ netif_napi_del(&q_vector->napi);
++ }
++}
++
++#ifdef CONFIG_PM
++static int ixgbe_resume(struct pci_dev *pdev)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct ixgbe_adapter *adapter = netdev_priv(netdev);
++ u32 err;
++
++ pci_set_power_state(pdev, PCI_D0);
++ pci_restore_state(pdev);
++ err = pci_enable_device(pdev);
++ if (err) {
++ printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
++ "suspend\n");
++ return err;
++ }
++ pci_set_master(pdev);
++
++ pci_enable_wake(pdev, PCI_D3hot, 0);
++ pci_enable_wake(pdev, PCI_D3cold, 0);
++
++ err = ixgbe_init_interrupt_scheme(adapter);
++ if (err) {
++ printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
++ "device\n");
++ return err;
++ }
++
++ ixgbe_napi_add_all(adapter);
++ ixgbe_reset(adapter);
++
++ if (netif_running(netdev)) {
++ err = ixgbe_open(adapter->netdev);
++ if (err)
++ return err;
++ }
++
++ netif_device_attach(netdev);
++
++ return 0;
++}
++
++#endif /* CONFIG_PM */
++static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct ixgbe_adapter *adapter = netdev_priv(netdev);
++#ifdef CONFIG_PM
++ int retval = 0;
++#endif
++
++ netif_device_detach(netdev);
++
++ if (netif_running(netdev)) {
++ ixgbe_down(adapter);
++ ixgbe_free_irq(adapter);
++ ixgbe_free_all_tx_resources(adapter);
++ ixgbe_free_all_rx_resources(adapter);
++ }
++ ixgbe_reset_interrupt_capability(adapter);
++ ixgbe_napi_del_all(adapter);
++ kfree(adapter->tx_ring);
++ kfree(adapter->rx_ring);
++
++#ifdef CONFIG_PM
++ retval = pci_save_state(pdev);
++ if (retval)
++ return retval;
++#endif
++
++ pci_enable_wake(pdev, PCI_D3hot, 0);
++ pci_enable_wake(pdev, PCI_D3cold, 0);
++
++ ixgbe_release_hw_control(adapter);
++
++ pci_disable_device(pdev);
++
++ pci_set_power_state(pdev, pci_choose_state(pdev, state));
++
++ return 0;
++}
++
++static void ixgbe_shutdown(struct pci_dev *pdev)
++{
++ ixgbe_suspend(pdev, PMSG_SUSPEND);
++}
++
++/**
+ * ixgbe_update_stats - Update the board statistics counters.
+ * @adapter: board private structure
+ **/
+@@ -2889,7 +3142,7 @@ void ixgbe_update_stats(struct ixgbe_ada
+
+ /* Rx Errors */
+ adapter->net_stats.rx_errors = adapter->stats.crcerrs +
+- adapter->stats.rlec;
++ adapter->stats.rlec;
+ adapter->net_stats.rx_dropped = 0;
+ adapter->net_stats.rx_length_errors = adapter->stats.rlec;
+ adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
+@@ -2903,27 +3156,74 @@ void ixgbe_update_stats(struct ixgbe_ada
+ static void ixgbe_watchdog(unsigned long data)
+ {
+ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
++ struct ixgbe_hw *hw = &adapter->hw;
++
++ /* Do the watchdog outside of interrupt context due to the lovely
++ * delays that some of the newer hardware requires */
++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
++ /* Cause software interrupt to ensure rx rings are cleaned */
++ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
++ u32 eics =
++ (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
++ IXGBE_WRITE_REG(hw, IXGBE_EICS, eics);
++ } else {
++ /* For legacy and MSI interrupts don't set any bits that
++ * are enabled for EIAM, because this operation would
++ * set *both* EIMS and EICS for any bit in EIAM */
++ IXGBE_WRITE_REG(hw, IXGBE_EICS,
++ (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
++ }
++ /* Reset the timer */
++ mod_timer(&adapter->watchdog_timer,
++ round_jiffies(jiffies + 2 * HZ));
++ }
++
++ schedule_work(&adapter->watchdog_task);
++}
++
++/**
++ * ixgbe_watchdog_task - worker thread to bring link up
++ * @work: pointer to work_struct containing our data
++ **/
++static void ixgbe_watchdog_task(struct work_struct *work)
++{
++ struct ixgbe_adapter *adapter = container_of(work,
++ struct ixgbe_adapter,
++ watchdog_task);
+ struct net_device *netdev = adapter->netdev;
+- bool link_up;
+- u32 link_speed = 0;
++ struct ixgbe_hw *hw = &adapter->hw;
++ u32 link_speed = adapter->link_speed;
++ bool link_up = adapter->link_up;
+
+- adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up);
++ adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
++
++ if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
++ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
++ if (link_up ||
++ time_after(jiffies, (adapter->link_check_timeout +
++ IXGBE_TRY_LINK_TIMEOUT))) {
++ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
++ adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
++ }
++ adapter->link_up = link_up;
++ adapter->link_speed = link_speed;
++ }
+
+ if (link_up) {
+ if (!netif_carrier_ok(netdev)) {
+- u32 frctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
+- u32 rmcs = IXGBE_READ_REG(&adapter->hw, IXGBE_RMCS);
++ u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
++ u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
+ #define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
+ #define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
+ DPRINTK(LINK, INFO, "NIC Link is Up %s, "
+- "Flow Control: %s\n",
+- (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
+- "10 Gbps" :
+- (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
+- "1 Gbps" : "unknown speed")),
+- ((FLOW_RX && FLOW_TX) ? "RX/TX" :
+- (FLOW_RX ? "RX" :
+- (FLOW_TX ? "TX" : "None"))));
++ "Flow Control: %s\n",
++ (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
++ "10 Gbps" :
++ (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
++ "1 Gbps" : "unknown speed")),
++ ((FLOW_RX && FLOW_TX) ? "RX/TX" :
++ (FLOW_RX ? "RX" :
++ (FLOW_TX ? "TX" : "None"))));
+
+ netif_carrier_on(netdev);
+ netif_tx_wake_all_queues(netdev);
+@@ -2932,6 +3232,8 @@ static void ixgbe_watchdog(unsigned long
+ adapter->detect_tx_hung = true;
+ }
+ } else {
++ adapter->link_up = false;
++ adapter->link_speed = 0;
+ if (netif_carrier_ok(netdev)) {
+ DPRINTK(LINK, INFO, "NIC Link is Down\n");
+ netif_carrier_off(netdev);
+@@ -2940,36 +3242,19 @@ static void ixgbe_watchdog(unsigned long
+ }
+
+ ixgbe_update_stats(adapter);
+-
+- if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+- /* Cause software interrupt to ensure rx rings are cleaned */
+- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+- u32 eics =
+- (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
+- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, eics);
+- } else {
+- /* for legacy and MSI interrupts don't set any bits that
+- * are enabled for EIAM, because this operation would
+- * set *both* EIMS and EICS for any bit in EIAM */
+- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
+- (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
+- }
+- /* Reset the timer */
+- mod_timer(&adapter->watchdog_timer,
+- round_jiffies(jiffies + 2 * HZ));
+- }
++ adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
+ }
+
+ static int ixgbe_tso(struct ixgbe_adapter *adapter,
+- struct ixgbe_ring *tx_ring, struct sk_buff *skb,
+- u32 tx_flags, u8 *hdr_len)
++ struct ixgbe_ring *tx_ring, struct sk_buff *skb,
++ u32 tx_flags, u8 *hdr_len)
+ {
+ struct ixgbe_adv_tx_context_desc *context_desc;
+ unsigned int i;
+ int err;
+ struct ixgbe_tx_buffer *tx_buffer_info;
+- u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+- u32 mss_l4len_idx = 0, l4len;
++ u32 vlan_macip_lens = 0, type_tucmd_mlhl;
++ u32 mss_l4len_idx, l4len;
+
+ if (skb_is_gso(skb)) {
+ if (skb_header_cloned(skb)) {
+@@ -2985,16 +3270,16 @@ static int ixgbe_tso(struct ixgbe_adapte
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+- iph->daddr, 0,
+- IPPROTO_TCP,
+- 0);
++ iph->daddr, 0,
++ IPPROTO_TCP,
++ 0);
+ adapter->hw_tso_ctxt++;
+ } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+- &ipv6_hdr(skb)->daddr,
+- 0, IPPROTO_TCP, 0);
++ &ipv6_hdr(skb)->daddr,
++ 0, IPPROTO_TCP, 0);
+ adapter->hw_tso6_ctxt++;
+ }
+
+@@ -3008,7 +3293,7 @@ static int ixgbe_tso(struct ixgbe_adapte
+ vlan_macip_lens |=
+ (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
+ vlan_macip_lens |= ((skb_network_offset(skb)) <<
+- IXGBE_ADVTXD_MACLEN_SHIFT);
++ IXGBE_ADVTXD_MACLEN_SHIFT);
+ *hdr_len += skb_network_offset(skb);
+ vlan_macip_lens |=
+ (skb_transport_header(skb) - skb_network_header(skb));
+@@ -3018,8 +3303,8 @@ static int ixgbe_tso(struct ixgbe_adapte
+ context_desc->seqnum_seed = 0;
+
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+- type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
+- IXGBE_ADVTXD_DTYP_CTXT);
++ type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
++ IXGBE_ADVTXD_DTYP_CTXT);
+
+ if (skb->protocol == htons(ETH_P_IP))
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+@@ -3027,9 +3312,11 @@ static int ixgbe_tso(struct ixgbe_adapte
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
+
+ /* MSS L4LEN IDX */
+- mss_l4len_idx |=
++ mss_l4len_idx =
+ (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
+ mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
++ /* use index 1 for TSO */
++ mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+
+ tx_buffer_info->time_stamp = jiffies;
+@@ -3046,8 +3333,8 @@ static int ixgbe_tso(struct ixgbe_adapte
+ }
+
+ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
+- struct ixgbe_ring *tx_ring,
+- struct sk_buff *skb, u32 tx_flags)
++ struct ixgbe_ring *tx_ring,
++ struct sk_buff *skb, u32 tx_flags)
+ {
+ struct ixgbe_adv_tx_context_desc *context_desc;
+ unsigned int i;
+@@ -3064,16 +3351,16 @@ static bool ixgbe_tx_csum(struct ixgbe_a
+ vlan_macip_lens |=
+ (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
+ vlan_macip_lens |= (skb_network_offset(skb) <<
+- IXGBE_ADVTXD_MACLEN_SHIFT);
++ IXGBE_ADVTXD_MACLEN_SHIFT);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ vlan_macip_lens |= (skb_transport_header(skb) -
+- skb_network_header(skb));
++ skb_network_header(skb));
+
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = 0;
+
+ type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
+- IXGBE_ADVTXD_DTYP_CTXT);
++ IXGBE_ADVTXD_DTYP_CTXT);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ switch (skb->protocol) {
+@@ -3081,16 +3368,14 @@ static bool ixgbe_tx_csum(struct ixgbe_a
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ type_tucmd_mlhl |=
+- IXGBE_ADVTXD_TUCMD_L4T_TCP;
++ IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+-
+ case __constant_htons(ETH_P_IPV6):
+ /* XXX what about other V6 headers?? */
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ type_tucmd_mlhl |=
+- IXGBE_ADVTXD_TUCMD_L4T_TCP;
++ IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+-
+ default:
+ if (unlikely(net_ratelimit())) {
+ DPRINTK(PROBE, WARNING,
+@@ -3102,10 +3387,12 @@ static bool ixgbe_tx_csum(struct ixgbe_a
+ }
+
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
++ /* use index zero for tx checksum offload */
+ context_desc->mss_l4len_idx = 0;
+
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
++
+ adapter->hw_csum_tx_good++;
+ i++;
+ if (i == tx_ring->count)
+@@ -3114,12 +3401,13 @@ static bool ixgbe_tx_csum(struct ixgbe_a
+
+ return true;
+ }
++
+ return false;
+ }
+
+ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
+- struct ixgbe_ring *tx_ring,
+- struct sk_buff *skb, unsigned int first)
++ struct ixgbe_ring *tx_ring,
++ struct sk_buff *skb, unsigned int first)
+ {
+ struct ixgbe_tx_buffer *tx_buffer_info;
+ unsigned int len = skb->len;
+@@ -3137,8 +3425,8 @@ static int ixgbe_tx_map(struct ixgbe_ada
+
+ tx_buffer_info->length = size;
+ tx_buffer_info->dma = pci_map_single(adapter->pdev,
+- skb->data + offset,
+- size, PCI_DMA_TODEVICE);
++ skb->data + offset,
++ size, PCI_DMA_TODEVICE);
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+@@ -3163,9 +3451,10 @@ static int ixgbe_tx_map(struct ixgbe_ada
+
+ tx_buffer_info->length = size;
+ tx_buffer_info->dma = pci_map_page(adapter->pdev,
+- frag->page,
+- offset,
+- size, PCI_DMA_TODEVICE);
++ frag->page,
++ offset,
++ size,
++ PCI_DMA_TODEVICE);
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+@@ -3188,8 +3477,8 @@ static int ixgbe_tx_map(struct ixgbe_ada
+ }
+
+ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
+- struct ixgbe_ring *tx_ring,
+- int tx_flags, int count, u32 paylen, u8 hdr_len)
++ struct ixgbe_ring *tx_ring,
++ int tx_flags, int count, u32 paylen, u8 hdr_len)
+ {
+ union ixgbe_adv_tx_desc *tx_desc = NULL;
+ struct ixgbe_tx_buffer *tx_buffer_info;
+@@ -3208,15 +3497,17 @@ static void ixgbe_tx_queue(struct ixgbe_
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+
+ olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
+- IXGBE_ADVTXD_POPTS_SHIFT;
++ IXGBE_ADVTXD_POPTS_SHIFT;
+
++ /* use index 1 context for tso */
++ olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+ if (tx_flags & IXGBE_TX_FLAGS_IPV4)
+ olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
+- IXGBE_ADVTXD_POPTS_SHIFT;
++ IXGBE_ADVTXD_POPTS_SHIFT;
+
+ } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+ olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
+- IXGBE_ADVTXD_POPTS_SHIFT;
++ IXGBE_ADVTXD_POPTS_SHIFT;
+
+ olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+@@ -3226,9 +3517,8 @@ static void ixgbe_tx_queue(struct ixgbe_
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
+ tx_desc->read.cmd_type_len =
+- cpu_to_le32(cmd_type_len | tx_buffer_info->length);
++ cpu_to_le32(cmd_type_len | tx_buffer_info->length);
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+-
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+@@ -3249,7 +3539,7 @@ static void ixgbe_tx_queue(struct ixgbe_
+ }
+
+ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
+- struct ixgbe_ring *tx_ring, int size)
++ struct ixgbe_ring *tx_ring, int size)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+@@ -3265,61 +3555,52 @@ static int __ixgbe_maybe_stop_tx(struct
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+- netif_wake_subqueue(netdev, tx_ring->queue_index);
++ netif_start_subqueue(netdev, tx_ring->queue_index);
+ ++adapter->restart_queue;
+ return 0;
+ }
+
+ static int ixgbe_maybe_stop_tx(struct net_device *netdev,
+- struct ixgbe_ring *tx_ring, int size)
++ struct ixgbe_ring *tx_ring, int size)
+ {
+ if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
+ }
+
+-
+ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_ring *tx_ring;
+- unsigned int len = skb->len;
+ unsigned int first;
+ unsigned int tx_flags = 0;
+ u8 hdr_len = 0;
+ int r_idx = 0, tso;
+- unsigned int mss = 0;
+ int count = 0;
+ unsigned int f;
+- unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+- len -= skb->data_len;
++
+ r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping;
+ tx_ring = &adapter->tx_ring[r_idx];
+
+-
+- if (skb->len <= 0) {
+- dev_kfree_skb(skb);
+- return NETDEV_TX_OK;
++ if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
++ tx_flags |= vlan_tx_tag_get(skb);
++ tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
++ tx_flags |= IXGBE_TX_FLAGS_VLAN;
+ }
+- mss = skb_shinfo(skb)->gso_size;
+-
+- if (mss)
+- count++;
+- else if (skb->ip_summed == CHECKSUM_PARTIAL)
++ /* three things can cause us to need a context descriptor */
++ if (skb_is_gso(skb) ||
++ (skb->ip_summed == CHECKSUM_PARTIAL) ||
++ (tx_flags & IXGBE_TX_FLAGS_VLAN))
+ count++;
+
+- count += TXD_USE_COUNT(len);
+- for (f = 0; f < nr_frags; f++)
++ count += TXD_USE_COUNT(skb_headlen(skb));
++ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+
+ if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
+ adapter->tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
+- if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+- tx_flags |= IXGBE_TX_FLAGS_VLAN;
+- tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
+- }
+
+ if (skb->protocol == htons(ETH_P_IP))
+ tx_flags |= IXGBE_TX_FLAGS_IPV4;
+@@ -3333,12 +3614,12 @@ static int ixgbe_xmit_frame(struct sk_bu
+ if (tso)
+ tx_flags |= IXGBE_TX_FLAGS_TSO;
+ else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+- (skb->ip_summed == CHECKSUM_PARTIAL))
++ (skb->ip_summed == CHECKSUM_PARTIAL))
+ tx_flags |= IXGBE_TX_FLAGS_CSUM;
+
+ ixgbe_tx_queue(adapter, tx_ring, tx_flags,
+- ixgbe_tx_map(adapter, tx_ring, skb, first),
+- skb->len, hdr_len);
++ ixgbe_tx_map(adapter, tx_ring, skb, first),
++ skb->len, hdr_len);
+
+ netdev->trans_start = jiffies;
+
+@@ -3372,15 +3653,16 @@ static struct net_device_stats *ixgbe_ge
+ static int ixgbe_set_mac(struct net_device *netdev, void *p)
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
++ struct ixgbe_hw *hw = &adapter->hw;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+- memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
++ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+
+- ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
++ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+ return 0;
+ }
+@@ -3404,28 +3686,19 @@ static void ixgbe_netpoll(struct net_dev
+ #endif
+
+ /**
+- * ixgbe_napi_add_all - prep napi structs for use
+- * @adapter: private struct
+- * helper function to napi_add each possible q_vector->napi
+- */
+-static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
++ * ixgbe_link_config - set up initial link with default speed and duplex
++ * @hw: pointer to private hardware struct
++ *
++ * Returns 0 on success, negative on failure
++ **/
++static int ixgbe_link_config(struct ixgbe_hw *hw)
+ {
+- int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+- int (*poll)(struct napi_struct *, int);
++ u32 autoneg = IXGBE_LINK_SPEED_10GB_FULL;
+
+- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+- poll = &ixgbe_clean_rxonly;
+- } else {
+- poll = &ixgbe_poll;
+- /* only one q_vector for legacy modes */
+- q_vectors = 1;
+- }
++ /* must always autoneg for both 1G and 10G link */
++ hw->mac.autoneg = true;
+
+- for (i = 0; i < q_vectors; i++) {
+- struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
+- netif_napi_add(adapter->netdev, &q_vector->napi,
+- (*poll), 64);
+- }
++ return hw->mac.ops.setup_link_speed(hw, autoneg, true, true);
+ }
+
+ /**
+@@ -3440,17 +3713,16 @@ static void ixgbe_napi_add_all(struct ix
+ * and a hardware reset occur.
+ **/
+ static int __devinit ixgbe_probe(struct pci_dev *pdev,
+- const struct pci_device_id *ent)
++ const struct pci_device_id *ent)
+ {
+ struct net_device *netdev;
+ struct ixgbe_adapter *adapter = NULL;
+ struct ixgbe_hw *hw;
+ const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
+- unsigned long mmio_start, mmio_len;
+ static int cards_found;
+ int i, err, pci_using_dac;
+ u16 link_status, link_speed, link_width;
+- u32 part_num;
++ u32 part_num, eec;
+
+ err = pci_enable_device(pdev);
+ if (err)
+@@ -3465,7 +3737,7 @@ static int __devinit ixgbe_probe(struct
+ err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA "
+- "configuration, aborting\n");
++ "configuration, aborting\n");
+ goto err_dma;
+ }
+ }
+@@ -3498,10 +3770,8 @@ static int __devinit ixgbe_probe(struct
+ hw->back = adapter;
+ adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+
+- mmio_start = pci_resource_start(pdev, 0);
+- mmio_len = pci_resource_len(pdev, 0);
+-
+- hw->hw_addr = ioremap(mmio_start, mmio_len);
++ hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
++ pci_resource_len(pdev, 0));
+ if (!hw->hw_addr) {
+ err = -EIO;
+ goto err_ioremap;
+@@ -3516,7 +3786,8 @@ static int __devinit ixgbe_probe(struct
+ netdev->stop = &ixgbe_close;
+ netdev->hard_start_xmit = &ixgbe_xmit_frame;
+ netdev->get_stats = &ixgbe_get_stats;
+- netdev->set_multicast_list = &ixgbe_set_multi;
++ netdev->set_rx_mode = &ixgbe_set_rx_mode;
++ netdev->set_multicast_list = &ixgbe_set_rx_mode;
+ netdev->set_mac_address = &ixgbe_set_mac;
+ netdev->change_mtu = &ixgbe_change_mtu;
+ ixgbe_set_ethtool_ops(netdev);
+@@ -3530,22 +3801,23 @@ static int __devinit ixgbe_probe(struct
+ #endif
+ strcpy(netdev->name, pci_name(pdev));
+
+- netdev->mem_start = mmio_start;
+- netdev->mem_end = mmio_start + mmio_len;
+-
+ adapter->bd_number = cards_found;
+
+- /* PCI config space info */
+- hw->vendor_id = pdev->vendor;
+- hw->device_id = pdev->device;
+- hw->revision_id = pdev->revision;
+- hw->subsystem_vendor_id = pdev->subsystem_vendor;
+- hw->subsystem_device_id = pdev->subsystem_device;
+-
+ /* Setup hw api */
+ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+ hw->mac.type = ii->mac;
+
++ /* EEPROM */
++ memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
++ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
++ /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
++ if (!(eec & (1 << 8)))
++ hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
++
++ /* PHY */
++ memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
++ /* phy->sfp_type = ixgbe_sfp_type_unknown; */
++
+ err = ii->get_invariants(hw);
+ if (err)
+ goto err_hw_init;
+@@ -3555,26 +3827,36 @@ static int __devinit ixgbe_probe(struct
+ if (err)
+ goto err_sw_init;
+
++ /* reset_hw fills in the perm_addr as well */
++ err = hw->mac.ops.reset_hw(hw);
++ if (err) {
++ dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
++ goto err_sw_init;
++ }
++
+ netdev->features = NETIF_F_SG |
+- NETIF_F_HW_CSUM |
+- NETIF_F_HW_VLAN_TX |
+- NETIF_F_HW_VLAN_RX |
+- NETIF_F_HW_VLAN_FILTER;
++ NETIF_F_IP_CSUM |
++ NETIF_F_HW_VLAN_TX |
++ NETIF_F_HW_VLAN_RX |
++ NETIF_F_HW_VLAN_FILTER;
+
+- netdev->features |= NETIF_F_LRO;
++ netdev->features |= NETIF_F_IPV6_CSUM;
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
++#ifdef CONFIG_IXGBE_LRO
++ netdev->features |= NETIF_F_LRO;
++#endif
+
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_TSO6;
+- netdev->vlan_features |= NETIF_F_HW_CSUM;
++ netdev->vlan_features |= NETIF_F_IP_CSUM;
+ netdev->vlan_features |= NETIF_F_SG;
+
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ /* make sure the EEPROM is good */
+- if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
++ if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
+ dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
+ err = -EIO;
+ goto err_eeprom;
+@@ -3583,7 +3865,8 @@ static int __devinit ixgbe_probe(struct
+ memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
+
+- if (ixgbe_validate_mac_addr(netdev->dev_addr)) {
++ if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
++ dev_err(&pdev->dev, "invalid MAC address\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+@@ -3593,13 +3876,7 @@ static int __devinit ixgbe_probe(struct
+ adapter->watchdog_timer.data = (unsigned long)adapter;
+
+ INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
+-
+- /* initialize default flow control settings */
+- hw->fc.original_type = ixgbe_fc_full;
+- hw->fc.type = ixgbe_fc_full;
+- hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
+- hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
+- hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
++ INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
+
+ err = ixgbe_init_interrupt_scheme(adapter);
+ if (err)
+@@ -3610,32 +3887,39 @@ static int __devinit ixgbe_probe(struct
+ link_speed = link_status & IXGBE_PCI_LINK_SPEED;
+ link_width = link_status & IXGBE_PCI_LINK_WIDTH;
+ dev_info(&pdev->dev, "(PCI Express:%s:%s) "
+- "%02x:%02x:%02x:%02x:%02x:%02x\n",
+- ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
+- (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
+- "Unknown"),
+- ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
+- (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
+- (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
+- (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
+- "Unknown"),
+- netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
+- netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
+- ixgbe_read_part_num(hw, &part_num);
++ "%02x:%02x:%02x:%02x:%02x:%02x\n",
++ ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
++ (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
++ "Unknown"),
++ ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
++ (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
++ (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
++ (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
++ "Unknown"),
++ netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
++ netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
++ ixgbe_read_pba_num_generic(hw, &part_num);
+ dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
+- hw->mac.type, hw->phy.type,
+- (part_num >> 8), (part_num & 0xff));
++ hw->mac.type, hw->phy.type,
++ (part_num >> 8), (part_num & 0xff));
+
+ if (link_width <= IXGBE_PCI_LINK_WIDTH_4) {
+ dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
+- "this card is not sufficient for optimal "
+- "performance.\n");
++ "this card is not sufficient for optimal "
++ "performance.\n");
+ dev_warn(&pdev->dev, "For optimal performance a x8 "
+- "PCI-Express slot is required.\n");
++ "PCI-Express slot is required.\n");
+ }
+
+ /* reset the hardware with the new settings */
+- ixgbe_start_hw(hw);
++ hw->mac.ops.start_hw(hw);
++
++ /* link_config depends on start_hw being called at least once */
++ err = ixgbe_link_config(hw);
++ if (err) {
++ dev_err(&pdev->dev, "setup_link_speed FAILED %d\n", err);
++ goto err_register;
++ }
+
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+@@ -3647,7 +3931,7 @@ static int __devinit ixgbe_probe(struct
+ if (err)
+ goto err_register;
+
+-#ifdef CONFIG_DCA
++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ if (dca_add_requester(&pdev->dev) == 0) {
+ adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+ /* always use CB2 mode, difference is masked
+@@ -3697,7 +3981,7 @@ static void __devexit ixgbe_remove(struc
+
+ flush_scheduled_work();
+
+-#ifdef CONFIG_DCA
++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
+ adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
+ dca_remove_requester(&pdev->dev);
+@@ -3715,6 +3999,7 @@ static void __devexit ixgbe_remove(struc
+ pci_release_regions(pdev);
+
+ DPRINTK(PROBE, INFO, "complete\n");
++ ixgbe_napi_del_all(adapter);
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+
+@@ -3732,7 +4017,7 @@ static void __devexit ixgbe_remove(struc
+ * this device has been detected.
+ */
+ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
+- pci_channel_state_t state)
++ pci_channel_state_t state)
+ {
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbe_adapter *adapter = netdev->priv;
+@@ -3743,7 +4028,7 @@ static pci_ers_result_t ixgbe_io_error_d
+ ixgbe_down(adapter);
+ pci_disable_device(pdev);
+
+- /* Request a slot slot reset. */
++ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+@@ -3760,7 +4045,7 @@ static pci_ers_result_t ixgbe_io_slot_re
+
+ if (pci_enable_device(pdev)) {
+ DPRINTK(PROBE, ERR,
+- "Cannot re-enable PCI device after reset.\n");
++ "Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+@@ -3794,7 +4079,6 @@ static void ixgbe_io_resume(struct pci_d
+ }
+
+ netif_device_attach(netdev);
+-
+ }
+
+ static struct pci_error_handlers ixgbe_err_handler = {
+@@ -3830,13 +4114,14 @@ static int __init ixgbe_init_module(void
+
+ printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
+
+-#ifdef CONFIG_DCA
++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ dca_register_notify(&dca_notifier);
+
+ #endif
+ ret = pci_register_driver(&ixgbe_driver);
+ return ret;
+ }
++
+ module_init(ixgbe_init_module);
+
+ /**
+@@ -3847,24 +4132,24 @@ module_init(ixgbe_init_module);
+ **/
+ static void __exit ixgbe_exit_module(void)
+ {
+-#ifdef CONFIG_DCA
++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ dca_unregister_notify(&dca_notifier);
+ #endif
+ pci_unregister_driver(&ixgbe_driver);
+ }
+
+-#ifdef CONFIG_DCA
++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
+- void *p)
++ void *p)
+ {
+ int ret_val;
+
+ ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
+- __ixgbe_notify_dca);
++ __ixgbe_notify_dca);
+
+ return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
+ }
+-#endif /* CONFIG_DCA */
++#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
+
+ module_exit(ixgbe_exit_module);
+
+--- a/drivers/net/ixgbe/ixgbe_phy.c
++++ b/drivers/net/ixgbe/ixgbe_phy.c
+@@ -1,7 +1,7 @@
+ /*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+- Copyright(c) 1999 - 2007 Intel Corporation.
++ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+@@ -20,7 +20,6 @@
+ the file called "COPYING".
+
+ Contact Information:
+- Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+@@ -33,32 +32,36 @@
+ #include "ixgbe_common.h"
+ #include "ixgbe_phy.h"
+
++static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
+ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
+ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
+-static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
+-static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
+- u32 device_type, u16 phy_data);
+
+ /**
+- * ixgbe_identify_phy - Get physical layer module
++ * ixgbe_identify_phy_generic - Get physical layer module
+ * @hw: pointer to hardware structure
+ *
+ * Determines the physical layer module found on the current adapter.
+ **/
+-s32 ixgbe_identify_phy(struct ixgbe_hw *hw)
++s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
+ {
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u32 phy_addr;
+
+- for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
+- if (ixgbe_validate_phy_addr(hw, phy_addr)) {
+- hw->phy.addr = phy_addr;
+- ixgbe_get_phy_id(hw);
+- hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id);
+- status = 0;
+- break;
++ if (hw->phy.type == ixgbe_phy_unknown) {
++ for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
++ if (ixgbe_validate_phy_addr(hw, phy_addr)) {
++ hw->phy.addr = phy_addr;
++ ixgbe_get_phy_id(hw);
++ hw->phy.type =
++ ixgbe_get_phy_type_from_id(hw->phy.id);
++ status = 0;
++ break;
++ }
+ }
++ } else {
++ status = 0;
+ }
++
+ return status;
+ }
+
+@@ -73,10 +76,8 @@ static bool ixgbe_validate_phy_addr(stru
+ bool valid = false;
+
+ hw->phy.addr = phy_addr;
+- ixgbe_read_phy_reg(hw,
+- IXGBE_MDIO_PHY_ID_HIGH,
+- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+- &phy_id);
++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
+
+ if (phy_id != 0xFFFF && phy_id != 0x0)
+ valid = true;
+@@ -95,21 +96,18 @@ static s32 ixgbe_get_phy_id(struct ixgbe
+ u16 phy_id_high = 0;
+ u16 phy_id_low = 0;
+
+- status = ixgbe_read_phy_reg(hw,
+- IXGBE_MDIO_PHY_ID_HIGH,
+- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+- &phy_id_high);
++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
++ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
++ &phy_id_high);
+
+ if (status == 0) {
+ hw->phy.id = (u32)(phy_id_high << 16);
+- status = ixgbe_read_phy_reg(hw,
+- IXGBE_MDIO_PHY_ID_LOW,
+- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+- &phy_id_low);
++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW,
++ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
++ &phy_id_low);
+ hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
+ hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
+ }
+-
+ return status;
+ }
+
+@@ -123,9 +121,6 @@ static enum ixgbe_phy_type ixgbe_get_phy
+ enum ixgbe_phy_type phy_type;
+
+ switch (phy_id) {
+- case TN1010_PHY_ID:
+- phy_type = ixgbe_phy_tn;
+- break;
+ case QT2022_PHY_ID:
+ phy_type = ixgbe_phy_qt;
+ break;
+@@ -138,32 +133,31 @@ static enum ixgbe_phy_type ixgbe_get_phy
+ }
+
+ /**
+- * ixgbe_reset_phy - Performs a PHY reset
++ * ixgbe_reset_phy_generic - Performs a PHY reset
+ * @hw: pointer to hardware structure
+ **/
+-s32 ixgbe_reset_phy(struct ixgbe_hw *hw)
++s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
+ {
+ /*
+ * Perform soft PHY reset to the PHY_XS.
+ * This will cause a soft reset to the PHY
+ */
+- return ixgbe_write_phy_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+- IXGBE_MDIO_PHY_XS_DEV_TYPE,
+- IXGBE_MDIO_PHY_XS_RESET);
++ return hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
++ IXGBE_MDIO_PHY_XS_DEV_TYPE,
++ IXGBE_MDIO_PHY_XS_RESET);
+ }
+
+ /**
+- * ixgbe_read_phy_reg - Reads a value from a specified PHY register
++ * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @phy_data: Pointer to read data from PHY register
+ **/
+-s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
+- u32 device_type, u16 *phy_data)
++s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
++ u32 device_type, u16 *phy_data)
+ {
+ u32 command;
+ u32 i;
+- u32 timeout = 10;
+ u32 data;
+ s32 status = 0;
+ u16 gssr;
+@@ -179,9 +173,9 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *
+ if (status == 0) {
+ /* Setup and write the address cycle command */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+- (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+- (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
++ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
++ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
++ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+@@ -190,7 +184,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+- for (i = 0; i < timeout; i++) {
++ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+@@ -210,9 +204,9 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *
+ * command
+ */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+- (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+- (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
++ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
++ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
++ (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+@@ -221,7 +215,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *
+ * completed. The MDI Command bit will clear when the
+ * operation is complete
+ */
+- for (i = 0; i < timeout; i++) {
++ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+@@ -231,8 +225,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+- hw_dbg(hw,
+- "PHY read command didn't complete\n");
++ hw_dbg(hw, "PHY read command didn't complete\n");
+ status = IXGBE_ERR_PHY;
+ } else {
+ /*
+@@ -247,22 +240,22 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *
+
+ ixgbe_release_swfw_sync(hw, gssr);
+ }
++
+ return status;
+ }
+
+ /**
+- * ixgbe_write_phy_reg - Writes a value to specified PHY register
++ * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ **/
+-static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
+- u32 device_type, u16 phy_data)
++s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
++ u32 device_type, u16 phy_data)
+ {
+ u32 command;
+ u32 i;
+- u32 timeout = 10;
+ s32 status = 0;
+ u16 gssr;
+
+@@ -280,9 +273,9 @@ static s32 ixgbe_write_phy_reg(struct ix
+
+ /* Setup and write the address cycle command */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+- (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+- (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
++ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
++ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
++ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+@@ -291,19 +284,19 @@ static s32 ixgbe_write_phy_reg(struct ix
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+- for (i = 0; i < timeout; i++) {
++ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+- if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) {
+- hw_dbg(hw, "PHY address cmd didn't complete\n");
++ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+- }
+ }
+
+- if ((command & IXGBE_MSCA_MDI_COMMAND) != 0)
++ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
++ hw_dbg(hw, "PHY address cmd didn't complete\n");
+ status = IXGBE_ERR_PHY;
++ }
+
+ if (status == 0) {
+ /*
+@@ -311,9 +304,9 @@ static s32 ixgbe_write_phy_reg(struct ix
+ * command
+ */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+- (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+- (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
++ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
++ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
++ (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+@@ -322,20 +315,19 @@ static s32 ixgbe_write_phy_reg(struct ix
+ * completed. The MDI Command bit will clear when the
+ * operation is complete
+ */
+- for (i = 0; i < timeout; i++) {
++ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+- if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) {
+- hw_dbg(hw, "PHY write command did not "
+- "complete.\n");
++ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+- }
+ }
+
+- if ((command & IXGBE_MSCA_MDI_COMMAND) != 0)
++ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
++ hw_dbg(hw, "PHY address cmd didn't complete\n");
+ status = IXGBE_ERR_PHY;
++ }
+ }
+
+ ixgbe_release_swfw_sync(hw, gssr);
+@@ -345,67 +337,54 @@ static s32 ixgbe_write_phy_reg(struct ix
+ }
+
+ /**
+- * ixgbe_setup_tnx_phy_link - Set and restart autoneg
++ * ixgbe_setup_phy_link_generic - Set and restart autoneg
+ * @hw: pointer to hardware structure
+ *
+ * Restart autonegotiation and PHY and waits for completion.
+ **/
+-s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw)
++s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
+ {
+ s32 status = IXGBE_NOT_IMPLEMENTED;
+ u32 time_out;
+ u32 max_time_out = 10;
+- u16 autoneg_speed_selection_register = 0x10;
+- u16 autoneg_restart_mask = 0x0200;
+- u16 autoneg_complete_mask = 0x0020;
+- u16 autoneg_reg = 0;
++ u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
+
+ /*
+ * Set advertisement settings in PHY based on autoneg_advertised
+ * settings. If autoneg_advertised = 0, then advertise default values
+- * txn devices cannot be "forced" to a autoneg 10G and fail. But can
++ * tnx devices cannot be "forced" to a autoneg 10G and fail. But can
+ * for a 1G.
+ */
+- ixgbe_read_phy_reg(hw,
+- autoneg_speed_selection_register,
+- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+- &autoneg_reg);
++ hw->phy.ops.read_reg(hw, IXGBE_MII_SPEED_SELECTION_REG,
++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
+
+ if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL)
+ autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */
+ else
+ autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */
+
+- ixgbe_write_phy_reg(hw,
+- autoneg_speed_selection_register,
+- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+- autoneg_reg);
+-
++ hw->phy.ops.write_reg(hw, IXGBE_MII_SPEED_SELECTION_REG,
++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
+
+ /* Restart PHY autonegotiation and wait for completion */
+- ixgbe_read_phy_reg(hw,
+- IXGBE_MDIO_AUTO_NEG_CONTROL,
+- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+- &autoneg_reg);
+-
+- autoneg_reg |= autoneg_restart_mask;
+-
+- ixgbe_write_phy_reg(hw,
+- IXGBE_MDIO_AUTO_NEG_CONTROL,
+- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+- autoneg_reg);
++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
++
++ autoneg_reg |= IXGBE_MII_RESTART;
++
++ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
+
+ /* Wait for autonegotiation to finish */
+ for (time_out = 0; time_out < max_time_out; time_out++) {
+ udelay(10);
+ /* Restart PHY autonegotiation and wait for completion */
+- status = ixgbe_read_phy_reg(hw,
+- IXGBE_MDIO_AUTO_NEG_STATUS,
+- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+- &autoneg_reg);
++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
++ &autoneg_reg);
+
+- autoneg_reg &= autoneg_complete_mask;
+- if (autoneg_reg == autoneg_complete_mask) {
++ autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
++ if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
+ status = 0;
+ break;
+ }
+@@ -418,64 +397,17 @@ s32 ixgbe_setup_tnx_phy_link(struct ixgb
+ }
+
+ /**
+- * ixgbe_check_tnx_phy_link - Determine link and speed status
+- * @hw: pointer to hardware structure
+- *
+- * Reads the VS1 register to determine if link is up and the current speed for
+- * the PHY.
+- **/
+-s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed,
+- bool *link_up)
+-{
+- s32 status = 0;
+- u32 time_out;
+- u32 max_time_out = 10;
+- u16 phy_link = 0;
+- u16 phy_speed = 0;
+- u16 phy_data = 0;
+-
+- /* Initialize speed and link to default case */
+- *link_up = false;
+- *speed = IXGBE_LINK_SPEED_10GB_FULL;
+-
+- /*
+- * Check current speed and link status of the PHY register.
+- * This is a vendor specific register and may have to
+- * be changed for other copper PHYs.
+- */
+- for (time_out = 0; time_out < max_time_out; time_out++) {
+- udelay(10);
+- if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
+- *link_up = true;
+- if (phy_speed ==
+- IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
+- *speed = IXGBE_LINK_SPEED_1GB_FULL;
+- break;
+- } else {
+- status = ixgbe_read_phy_reg(hw,
+- IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
+- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+- &phy_data);
+- phy_link = phy_data &
+- IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
+- phy_speed = phy_data &
+- IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
+- }
+- }
+-
+- return status;
+-}
+-
+-/**
+- * ixgbe_setup_tnx_phy_link_speed - Sets the auto advertised capabilities
++ * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ **/
+-s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed,
+- bool autoneg,
+- bool autoneg_wait_to_complete)
++s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
++ ixgbe_link_speed speed,
++ bool autoneg,
++ bool autoneg_wait_to_complete)
+ {
++
+ /*
+ * Clear autoneg_advertised and set new values based on input link
+ * speed.
+@@ -484,11 +416,13 @@ s32 ixgbe_setup_tnx_phy_link_speed(struc
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
++
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ /* Setup link based on the new speed settings */
+- ixgbe_setup_tnx_phy_link(hw);
++ hw->phy.ops.setup_link(hw);
+
+ return 0;
+ }
++
+--- a/drivers/net/ixgbe/ixgbe_phy.h
++++ b/drivers/net/ixgbe/ixgbe_phy.h
+@@ -1,7 +1,7 @@
+ /*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+- Copyright(c) 1999 - 2007 Intel Corporation.
++ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+@@ -20,7 +20,6 @@
+ the file called "COPYING".
+
+ Contact Information:
+- Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+@@ -30,20 +29,52 @@
+ #define _IXGBE_PHY_H_
+
+ #include "ixgbe_type.h"
++#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
+
+-s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
+-s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up);
+-s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg,
+- bool autoneg_wait_to_complete);
+-s32 ixgbe_identify_phy(struct ixgbe_hw *hw);
+-s32 ixgbe_reset_phy(struct ixgbe_hw *hw);
+-s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
+- u32 device_type, u16 *phy_data);
+-
+-/* PHY specific */
+-s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw);
+-s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up);
+-s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg,
+- bool autoneg_wait_to_complete);
++/* EEPROM byte offsets */
++#define IXGBE_SFF_IDENTIFIER 0x0
++#define IXGBE_SFF_IDENTIFIER_SFP 0x3
++#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
++#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
++#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
++#define IXGBE_SFF_1GBE_COMP_CODES 0x6
++#define IXGBE_SFF_10GBE_COMP_CODES 0x3
++#define IXGBE_SFF_TRANSMISSION_MEDIA 0x9
++
++/* Bitmasks */
++#define IXGBE_SFF_TWIN_AX_CAPABLE 0x80
++#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
++#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
++#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
++#define IXGBE_I2C_EEPROM_READ_MASK 0x100
++#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
++#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
++#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
++#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
++#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
++
++/* Bit-shift macros */
++#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 12
++#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 8
++#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 4
++
++/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
++#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600
++#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500
++#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00
++
++
++s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
++s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
++s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
++s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
++ u32 device_type, u16 *phy_data);
++s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
++ u32 device_type, u16 phy_data);
++s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
++s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
++ ixgbe_link_speed speed,
++ bool autoneg,
++ bool autoneg_wait_to_complete);
+
+ #endif /* _IXGBE_PHY_H_ */
+--- a/drivers/net/ixgbe/ixgbe_type.h
++++ b/drivers/net/ixgbe/ixgbe_type.h
+@@ -1,7 +1,7 @@
+ /*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+- Copyright(c) 1999 - 2007 Intel Corporation.
++ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+@@ -20,7 +20,6 @@
+ the file called "COPYING".
+
+ Contact Information:
+- Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+@@ -37,9 +36,9 @@
+ /* Device IDs */
+ #define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
+ #define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
+-#define IXGBE_DEV_ID_82598AT_DUAL_PORT 0x10C8
+ #define IXGBE_DEV_ID_82598EB_CX4 0x10DD
+ #define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
++#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
+
+ /* General Registers */
+ #define IXGBE_CTRL 0x00000
+@@ -70,11 +69,11 @@
+ #define IXGBE_EIMC 0x00888
+ #define IXGBE_EIAC 0x00810
+ #define IXGBE_EIAM 0x00890
+-#define IXGBE_EITR(_i) (0x00820 + ((_i) * 4)) /* 0x820-0x86c */
+-#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
++#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : (0x012300 + ((_i) * 4)))
++#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
+ #define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */
+ #define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */
+-#define IXGBE_PBACL 0x11068
++#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4)))
+ #define IXGBE_GPIE 0x00898
+
+ /* Flow Control Registers */
+@@ -86,20 +85,33 @@
+ #define IXGBE_TFCS 0x0CE00
+
+ /* Receive DMA Registers */
+-#define IXGBE_RDBAL(_i) (0x01000 + ((_i) * 0x40)) /* 64 of each (0-63)*/
+-#define IXGBE_RDBAH(_i) (0x01004 + ((_i) * 0x40))
+-#define IXGBE_RDLEN(_i) (0x01008 + ((_i) * 0x40))
+-#define IXGBE_RDH(_i) (0x01010 + ((_i) * 0x40))
+-#define IXGBE_RDT(_i) (0x01018 + ((_i) * 0x40))
+-#define IXGBE_RXDCTL(_i) (0x01028 + ((_i) * 0x40))
+-#define IXGBE_RSCCTL(_i) (0x0102C + ((_i) * 0x40))
+-#define IXGBE_SRRCTL(_i) (0x02100 + ((_i) * 4))
+- /* array of 16 (0x02100-0x0213C) */
+-#define IXGBE_DCA_RXCTRL(_i) (0x02200 + ((_i) * 4))
+- /* array of 16 (0x02200-0x0223C) */
+-#define IXGBE_RDRXCTL 0x02F00
++#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : (0x0D000 + ((_i - 64) * 0x40)))
++#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : (0x0D004 + ((_i - 64) * 0x40)))
++#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : (0x0D008 + ((_i - 64) * 0x40)))
++#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : (0x0D010 + ((_i - 64) * 0x40)))
++#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : (0x0D018 + ((_i - 64) * 0x40)))
++#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : (0x0D028 + ((_i - 64) * 0x40)))
++/*
++ * Split and Replication Receive Control Registers
++ * 00-15 : 0x02100 + n*4
++ * 16-64 : 0x01014 + n*0x40
++ * 64-127: 0x0D014 + (n-64)*0x40
++ */
++#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
++ (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
++ (0x0D014 + ((_i - 64) * 0x40))))
++/*
++ * Rx DCA Control Register:
++ * 00-15 : 0x02200 + n*4
++ * 16-64 : 0x0100C + n*0x40
++ * 64-127: 0x0D00C + (n-64)*0x40
++ */
++#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
++ (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
++ (0x0D00C + ((_i - 64) * 0x40))))
++#define IXGBE_RDRXCTL 0x02F00
+ #define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
+- /* 8 of these 0x03C00 - 0x03C1C */
++ /* 8 of these 0x03C00 - 0x03C1C */
+ #define IXGBE_RXCTRL 0x03000
+ #define IXGBE_DROPEN 0x03D04
+ #define IXGBE_RXPBSIZE_SHIFT 10
+@@ -107,29 +119,32 @@
+ /* Receive Registers */
+ #define IXGBE_RXCSUM 0x05000
+ #define IXGBE_RFCTL 0x05008
++#define IXGBE_DRECCCTL 0x02F08
++#define IXGBE_DRECCCTL_DISABLE 0
++/* Multicast Table Array - 128 entries */
+ #define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
+- /* Multicast Table Array - 128 entries */
+-#define IXGBE_RAL(_i) (0x05400 + ((_i) * 8)) /* 16 of these (0-15) */
+-#define IXGBE_RAH(_i) (0x05404 + ((_i) * 8)) /* 16 of these (0-15) */
+-#define IXGBE_PSRTYPE 0x05480
+- /* 0x5480-0x54BC Packet split receive type */
++#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : (0x0A200 + ((_i) * 8)))
++#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : (0x0A204 + ((_i) * 8)))
++/* Packet split receive type */
++#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : (0x0EA00 + ((_i) * 4)))
++/* array of 4096 1-bit vlan filters */
+ #define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
+- /* array of 4096 1-bit vlan filters */
++/*array of 4096 4-bit vlan vmdq indices */
+ #define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
+- /*array of 4096 4-bit vlan vmdq indicies */
+ #define IXGBE_FCTRL 0x05080
+ #define IXGBE_VLNCTRL 0x05088
+ #define IXGBE_MCSTCTRL 0x05090
+ #define IXGBE_MRQC 0x05818
+-#define IXGBE_VMD_CTL 0x0581C
+ #define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */
+ #define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */
+ #define IXGBE_IMIRVP 0x05AC0
++#define IXGBE_VMD_CTL 0x0581C
+ #define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
+ #define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
+
++
+ /* Transmit DMA registers */
+-#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40))/* 32 of these (0-31)*/
++#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/
+ #define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
+ #define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
+ #define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40))
+@@ -138,11 +153,10 @@
+ #define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
+ #define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
+ #define IXGBE_DTXCTL 0x07E00
+-#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4))
+- /* there are 16 of these (0-15) */
++
++#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
+ #define IXGBE_TIPG 0x0CB00
+-#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) *0x04))
+- /* there are 8 of these */
++#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */
+ #define IXGBE_MNGTXMAP 0x0CD10
+ #define IXGBE_TIPG_FIBER_DEFAULT 3
+ #define IXGBE_TXPBSIZE_SHIFT 10
+@@ -154,6 +168,7 @@
+ #define IXGBE_IPAV 0x05838
+ #define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */
+ #define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */
++
+ #define IXGBE_WUPL 0x05900
+ #define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
+ #define IXGBE_FHFT 0x09000 /* Flex host filter table 9000-93FC */
+@@ -170,6 +185,8 @@
+ #define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
+ #define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
+
++
++
+ /* Stats registers */
+ #define IXGBE_CRCERRS 0x04000
+ #define IXGBE_ILLERRC 0x04004
+@@ -224,7 +241,7 @@
+ #define IXGBE_XEC 0x04120
+
+ #define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) /* 16 of these */
+-#define IXGBE_TQSMR(_i) (0x07300 + ((_i) * 4)) /* 8 of these */
++#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : (0x08600 + ((_i) * 4)))
+
+ #define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
+ #define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
+@@ -275,23 +292,17 @@
+ #define IXGBE_DCA_CTRL 0x11074
+
+ /* Diagnostic Registers */
+-#define IXGBE_RDSTATCTL 0x02C20
+-#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
+-#define IXGBE_RDHMPN 0x02F08
+-#define IXGBE_RIC_DW0 0x02F10
+-#define IXGBE_RIC_DW1 0x02F14
+-#define IXGBE_RIC_DW2 0x02F18
+-#define IXGBE_RIC_DW3 0x02F1C
+-#define IXGBE_RDPROBE 0x02F20
+-#define IXGBE_TDSTATCTL 0x07C20
+-#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
+-#define IXGBE_TDHMPN 0x07F08
+-#define IXGBE_TIC_DW0 0x07F10
+-#define IXGBE_TIC_DW1 0x07F14
+-#define IXGBE_TIC_DW2 0x07F18
+-#define IXGBE_TIC_DW3 0x07F1C
+-#define IXGBE_TDPROBE 0x07F20
+-#define IXGBE_TXBUFCTRL 0x0C600
++#define IXGBE_RDSTATCTL 0x02C20
++#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
++#define IXGBE_RDHMPN 0x02F08
++#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4))
++#define IXGBE_RDPROBE 0x02F20
++#define IXGBE_TDSTATCTL 0x07C20
++#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
++#define IXGBE_TDHMPN 0x07F08
++#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4))
++#define IXGBE_TDPROBE 0x07F20
++#define IXGBE_TXBUFCTRL 0x0C600
+ #define IXGBE_TXBUFDATA0 0x0C610
+ #define IXGBE_TXBUFDATA1 0x0C614
+ #define IXGBE_TXBUFDATA2 0x0C618
+@@ -356,12 +367,10 @@
+ #define IXGBE_ANLP2 0x042B4
+ #define IXGBE_ATLASCTL 0x04800
+
+-/* RSCCTL Bit Masks */
+-#define IXGBE_RSCCTL_RSCEN 0x01
+-#define IXGBE_RSCCTL_MAXDESC_1 0x00
+-#define IXGBE_RSCCTL_MAXDESC_4 0x04
+-#define IXGBE_RSCCTL_MAXDESC_8 0x08
+-#define IXGBE_RSCCTL_MAXDESC_16 0x0C
++/* RDRXCTL Bit Masks */
++#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */
++#define IXGBE_RDRXCTL_MVMEN 0x00000020
++#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
+
+ /* CTRL Bit Masks */
+ #define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
+@@ -394,7 +403,7 @@
+
+ #define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
+ #define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+-#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* TX Desc writeback RO bit */
++#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+ #define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
+
+ /* MSCA Bit Masks */
+@@ -418,10 +427,10 @@
+ #define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */
+
+ /* MSRWD bit masks */
+-#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF
+-#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0
+-#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000
+-#define IXGBE_MSRWD_READ_DATA_SHIFT 16
++#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF
++#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0
++#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000
++#define IXGBE_MSRWD_READ_DATA_SHIFT 16
+
+ /* Atlas registers */
+ #define IXGBE_ATLAS_PDN_LPBK 0x24
+@@ -436,6 +445,7 @@
+ #define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0
+ #define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0
+
++
+ /* Device Type definitions for new protocol MDIO commands */
+ #define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
+ #define IXGBE_MDIO_PCS_DEV_TYPE 0x3
+@@ -443,6 +453,8 @@
+ #define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
+ #define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
+
++#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
++
+ #define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */
+ #define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */
+ #define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */
+@@ -456,23 +468,39 @@
+ #define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */
+ #define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/
+ #define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/
+-#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Abilty Reg */
++#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */
+ #define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */
+ #define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */
+
++#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Address Reg */
++#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
++#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
++
++/* MII clause 22/28 definitions */
++#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800
++
++#define IXGBE_MII_SPEED_SELECTION_REG 0x10
++#define IXGBE_MII_RESTART 0x200
++#define IXGBE_MII_AUTONEG_COMPLETE 0x20
++#define IXGBE_MII_AUTONEG_REG 0x0
++
+ #define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0
+ #define IXGBE_MAX_PHY_ADDR 32
+
+ /* PHY IDs*/
+-#define TN1010_PHY_ID 0x00A19410
+ #define QT2022_PHY_ID 0x0043A400
+
++/* PHY Types */
++#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
++
+ /* General purpose Interrupt Enable */
+-#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
+-#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
+-#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
+-#define IXGBE_GPIE_EIAME 0x40000000
+-#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
++#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
++#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
++#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
++#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
++#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
++#define IXGBE_GPIE_EIAME 0x40000000
++#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
+
+ /* Transmit Flow Control status */
+ #define IXGBE_TFCS_TXOFF 0x00000001
+@@ -533,7 +561,7 @@
+ #define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */
+
+ /* RMCS Bit Masks */
+-#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recylce Mode enable */
++#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */
+ /* Receive Arbitration Control: 0 Round Robin, 1 DFP */
+ #define IXGBE_RMCS_RAC 0x00000004
+ #define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */
+@@ -541,12 +569,15 @@
+ #define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority flow control ena */
+ #define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */
+
++
+ /* Interrupt register bitmasks */
+
+ /* Extended Interrupt Cause Read */
+ #define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
+ #define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
+-#define IXGBE_EICR_MNG 0x00400000 /* Managability Event Interrupt */
++#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
++#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
++#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
+ #define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */
+ #define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */
+ #define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
+@@ -554,11 +585,12 @@
+
+ /* Extended Interrupt Cause Set */
+ #define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+-#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
+-#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
+-#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+-#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */
+-#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
++#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
++#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
++#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
++#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
++#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
++#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
+ #define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
+ #define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+@@ -566,7 +598,9 @@
+ #define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+ #define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
+ #define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+-#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */
++#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
++#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
++#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+ #define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */
+ #define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
+ #define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+@@ -575,18 +609,20 @@
+ #define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+ #define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
+ #define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+-#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */
+-#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Error */
++#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
++#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
++#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
++#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */
+ #define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
+ #define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+-#define IXGBE_EIMS_ENABLE_MASK (\
+- IXGBE_EIMS_RTX_QUEUE | \
+- IXGBE_EIMS_LSC | \
+- IXGBE_EIMS_TCP_TIMER | \
+- IXGBE_EIMS_OTHER)
++#define IXGBE_EIMS_ENABLE_MASK ( \
++ IXGBE_EIMS_RTX_QUEUE | \
++ IXGBE_EIMS_LSC | \
++ IXGBE_EIMS_TCP_TIMER | \
++ IXGBE_EIMS_OTHER)
+
+-/* Immediate Interrupt RX (A.K.A. Low Latency Interrupt) */
++/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
+ #define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
+ #define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
+ #define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
+@@ -623,6 +659,7 @@
+ #define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */
+ #define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */
+
++
+ #define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
+
+ /* STATUS Bit Masks */
+@@ -670,16 +707,16 @@
+ #define IXGBE_AUTOC_AN_RESTART 0x00001000
+ #define IXGBE_AUTOC_FLU 0x00000001
+ #define IXGBE_AUTOC_LMS_SHIFT 13
+-#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT)
+-#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT)
+-#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT)
+-#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT)
+-#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT)
+-#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
+-#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
++#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT)
++#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT)
++#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT)
++#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT)
++#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT)
++#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
++#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+
+-#define IXGBE_AUTOC_1G_PMA_PMD 0x00000200
+-#define IXGBE_AUTOC_10G_PMA_PMD 0x00000180
++#define IXGBE_AUTOC_1G_PMA_PMD 0x00000200
++#define IXGBE_AUTOC_10G_PMA_PMD 0x00000180
+ #define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
+ #define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
+ #define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+@@ -705,6 +742,7 @@
+ #define IXGBE_LINKS_TL_FAULT 0x00001000
+ #define IXGBE_LINKS_SIGNAL 0x00000F00
+
++#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
+ #define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
+
+ /* SW Semaphore Register bitmasks */
+@@ -759,6 +797,11 @@
+ #define IXGBE_PBANUM0_PTR 0x15
+ #define IXGBE_PBANUM1_PTR 0x16
+
++/* Legacy EEPROM word offsets */
++#define IXGBE_ISCSI_BOOT_CAPS 0x0033
++#define IXGBE_ISCSI_SETUP_PORT_0 0x0030
++#define IXGBE_ISCSI_SETUP_PORT_1 0x0034
++
+ /* EEPROM Commands - SPI */
+ #define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */
+ #define IXGBE_EEPROM_STATUS_RDY_SPI 0x01
+@@ -766,7 +809,7 @@
+ #define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
+ #define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */
+ #define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */
+-/* EEPROM reset Write Enbale latch */
++/* EEPROM reset Write Enable latch */
+ #define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04
+ #define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */
+ #define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */
+@@ -805,26 +848,20 @@
+ /* Number of 100 microseconds we wait for PCI Express master disable */
+ #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+
+-/* PHY Types */
+-#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
+-
+ /* Check whether address is multicast. This is little-endian specific check.*/
+ #define IXGBE_IS_MULTICAST(Address) \
+- (bool)(((u8 *)(Address))[0] & ((u8)0x01))
++ (bool)(((u8 *)(Address))[0] & ((u8)0x01))
+
+ /* Check whether an address is broadcast. */
+ #define IXGBE_IS_BROADCAST(Address) \
+- ((((u8 *)(Address))[0] == ((u8)0xff)) && \
+- (((u8 *)(Address))[1] == ((u8)0xff)))
++ ((((u8 *)(Address))[0] == ((u8)0xff)) && \
++ (((u8 *)(Address))[1] == ((u8)0xff)))
+
+ /* RAH */
+ #define IXGBE_RAH_VIND_MASK 0x003C0000
+ #define IXGBE_RAH_VIND_SHIFT 18
+ #define IXGBE_RAH_AV 0x80000000
+-
+-/* Filters */
+-#define IXGBE_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
+-#define IXGBE_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
++#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF
+
+ /* Header split receive */
+ #define IXGBE_RFCTL_ISCSI_DIS 0x00000001
+@@ -853,7 +890,7 @@
+ #define IXGBE_MAX_FRAME_SZ 0x40040000
+
+ #define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */
+-#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq. # write-back enable */
++#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */
+
+ /* Receive Config masks */
+ #define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
+@@ -866,7 +903,7 @@
+ #define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
+ #define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */
+ #define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */
+-/* Receive Priority Flow Control Enbale */
++/* Receive Priority Flow Control Enable */
+ #define IXGBE_FCTRL_RPFCE 0x00004000
+ #define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */
+
+@@ -896,9 +933,8 @@
+ /* Receive Descriptor bit definitions */
+ #define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
+ #define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
+-#define IXGBE_RXD_STAT_IXSM 0x04 /* Ignore checksum */
+ #define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+-#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */
++#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+ #define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
+ #define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+ #define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+@@ -914,7 +950,7 @@
+ #define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
+ #define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
+ #define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
+-#define IXGBE_RXDADV_HBO 0x00800000
++#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
+ #define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
+ #define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
+ #define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
+@@ -928,15 +964,17 @@
+ #define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
+ #define IXGBE_RXD_CFI_SHIFT 12
+
++
+ /* SRRCTL bit definitions */
+-#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
+-#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
+-#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
+-#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
++#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
++#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
++#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
++#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
+ #define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+ #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+ #define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+ #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
++#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
+
+ #define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000
+ #define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
+@@ -970,21 +1008,20 @@
+ #define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
+ #define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
+ #define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
+-
+ /* Masks to determine if packets should be dropped due to frame errors */
+-#define IXGBE_RXD_ERR_FRAME_ERR_MASK (\
+- IXGBE_RXD_ERR_CE | \
+- IXGBE_RXD_ERR_LE | \
+- IXGBE_RXD_ERR_PE | \
+- IXGBE_RXD_ERR_OSE | \
+- IXGBE_RXD_ERR_USE)
+-
+-#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK (\
+- IXGBE_RXDADV_ERR_CE | \
+- IXGBE_RXDADV_ERR_LE | \
+- IXGBE_RXDADV_ERR_PE | \
+- IXGBE_RXDADV_ERR_OSE | \
+- IXGBE_RXDADV_ERR_USE)
++#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
++ IXGBE_RXD_ERR_CE | \
++ IXGBE_RXD_ERR_LE | \
++ IXGBE_RXD_ERR_PE | \
++ IXGBE_RXD_ERR_OSE | \
++ IXGBE_RXD_ERR_USE)
++
++#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
++ IXGBE_RXDADV_ERR_CE | \
++ IXGBE_RXDADV_ERR_LE | \
++ IXGBE_RXDADV_ERR_PE | \
++ IXGBE_RXDADV_ERR_OSE | \
++ IXGBE_RXDADV_ERR_USE)
+
+ /* Multicast bit mask */
+ #define IXGBE_MCSTCTRL_MFE 0x4
+@@ -1000,6 +1037,7 @@
+ #define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
+ #define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
+
++
+ /* Transmit Descriptor - Legacy */
+ struct ixgbe_legacy_tx_desc {
+ u64 buffer_addr; /* Address of the descriptor's data buffer */
+@@ -1007,15 +1045,15 @@ struct ixgbe_legacy_tx_desc {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+- u8 cso; /* Checksum offset */
+- u8 cmd; /* Descriptor control */
++ u8 cso; /* Checksum offset */
++ u8 cmd; /* Descriptor control */
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+- u8 status; /* Descriptor status */
+- u8 css; /* Checksum start */
++ u8 status; /* Descriptor status */
++ u8 css; /* Checksum start */
+ __le16 vlan;
+ } fields;
+ } upper;
+@@ -1024,7 +1062,7 @@ struct ixgbe_legacy_tx_desc {
+ /* Transmit Descriptor - Advanced */
+ union ixgbe_adv_tx_desc {
+ struct {
+- __le64 buffer_addr; /* Address of descriptor's data buf */
++ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+@@ -1039,9 +1077,9 @@ union ixgbe_adv_tx_desc {
+ struct ixgbe_legacy_rx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ __le16 length; /* Length of data DMAed into data buffer */
+- u16 csum; /* Packet checksum */
+- u8 status; /* Descriptor status */
+- u8 errors; /* Descriptor Errors */
++ __le16 csum; /* Packet checksum */
++ u8 status; /* Descriptor status */
++ u8 errors; /* Descriptor Errors */
+ __le16 vlan;
+ };
+
+@@ -1053,15 +1091,18 @@ union ixgbe_adv_rx_desc {
+ } read;
+ struct {
+ struct {
+- struct {
+- __le16 pkt_info; /* RSS type, Packet type */
+- __le16 hdr_info; /* Split Header, header len */
++ union {
++ __le32 data;
++ struct {
++ __le16 pkt_info; /* RSS, Pkt type */
++ __le16 hdr_info; /* Splithdr, hdrlen */
++ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+- u16 csum; /* Packet Checksum */
++ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+@@ -1082,49 +1123,69 @@ struct ixgbe_adv_tx_context_desc {
+ };
+
+ /* Adv Transmit Descriptor Config Masks */
+-#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buffer length(bytes) */
++#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */
+ #define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
+ #define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
+ #define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+ #define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
+ #define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
+-#define IXGBE_ADVTXD_DCMD_RDMA 0x04000000 /* RDMA */
+ #define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
+-#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
++#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
+ #define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
+ #define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
+ #define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+ #define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
+-#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED present in WB */
++#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */
+ #define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */
+ #define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
++#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
+ #define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
+ #define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
+- IXGBE_ADVTXD_POPTS_SHIFT)
++ IXGBE_ADVTXD_POPTS_SHIFT)
+ #define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
+- IXGBE_ADVTXD_POPTS_SHIFT)
+-#define IXGBE_ADVTXD_POPTS_EOM 0x00000400 /* Enable L bit-RDMA DDP hdr */
+-#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
+-#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
+-#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
+-#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/
+-#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */
+-#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+-#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+-#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
+-#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+-#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
+-#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
+-#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+-#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* Req requires Markers and CRC */
+-#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+-#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
++ IXGBE_ADVTXD_POPTS_SHIFT)
++#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
++#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
++#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
++#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */
++#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */
++#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
++#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
++#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
++#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
++#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
++#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
++#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
++#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
++#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/
++#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
++#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+
++/* Autonegotiation advertised speeds */
++typedef u32 ixgbe_autoneg_advertised;
+ /* Link speed */
++typedef u32 ixgbe_link_speed;
+ #define IXGBE_LINK_SPEED_UNKNOWN 0
+ #define IXGBE_LINK_SPEED_100_FULL 0x0008
+ #define IXGBE_LINK_SPEED_1GB_FULL 0x0020
+ #define IXGBE_LINK_SPEED_10GB_FULL 0x0080
++#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
++ IXGBE_LINK_SPEED_10GB_FULL)
++
++/* Physical layer type */
++typedef u32 ixgbe_physical_layer;
++#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
++#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001
++#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002
++#define IXGBE_PHYSICAL_LAYER_100BASE_T 0x0004
++#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008
++#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010
++#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020
++#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040
++#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080
++#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100
++#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200
++#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
+
+
+ enum ixgbe_eeprom_type {
+@@ -1141,16 +1202,38 @@ enum ixgbe_mac_type {
+
+ enum ixgbe_phy_type {
+ ixgbe_phy_unknown = 0,
+- ixgbe_phy_tn,
+ ixgbe_phy_qt,
+- ixgbe_phy_xaui
++ ixgbe_phy_xaui,
++ ixgbe_phy_tw_tyco,
++ ixgbe_phy_tw_unknown,
++ ixgbe_phy_sfp_avago,
++ ixgbe_phy_sfp_ftl,
++ ixgbe_phy_sfp_unknown,
++ ixgbe_phy_generic
++};
++
++/*
++ * SFP+ module type IDs:
++ *
++ * ID Module Type
++ * =============
++ * 0 SFP_DA_CU
++ * 1 SFP_SR
++ * 2 SFP_LR
++ */
++enum ixgbe_sfp_type {
++ ixgbe_sfp_type_da_cu = 0,
++ ixgbe_sfp_type_sr = 1,
++ ixgbe_sfp_type_lr = 2,
++ ixgbe_sfp_type_unknown = 0xFFFF
+ };
+
+ enum ixgbe_media_type {
+ ixgbe_media_type_unknown = 0,
+ ixgbe_media_type_fiber,
+ ixgbe_media_type_copper,
+- ixgbe_media_type_backplane
++ ixgbe_media_type_backplane,
++ ixgbe_media_type_virtual
+ };
+
+ /* Flow Control Settings */
+@@ -1167,6 +1250,8 @@ struct ixgbe_addr_filter_info {
+ u32 rar_used_count;
+ u32 mc_addr_in_rar_count;
+ u32 mta_in_use;
++ u32 overflow_promisc;
++ bool user_set_promisc;
+ };
+
+ /* Flow control parameters */
+@@ -1242,57 +1327,118 @@ struct ixgbe_hw_stats {
+ /* forward declaration */
+ struct ixgbe_hw;
+
++/* iterator type for walking multicast address lists */
++typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
++ u32 *vmdq);
++
++/* Function pointer table */
++struct ixgbe_eeprom_operations {
++ s32 (*init_params)(struct ixgbe_hw *);
++ s32 (*read)(struct ixgbe_hw *, u16, u16 *);
++ s32 (*write)(struct ixgbe_hw *, u16, u16);
++ s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
++ s32 (*update_checksum)(struct ixgbe_hw *);
++};
++
+ struct ixgbe_mac_operations {
+- s32 (*reset)(struct ixgbe_hw *);
++ s32 (*init_hw)(struct ixgbe_hw *);
++ s32 (*reset_hw)(struct ixgbe_hw *);
++ s32 (*start_hw)(struct ixgbe_hw *);
++ s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
+ enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
++ s32 (*get_supported_physical_layer)(struct ixgbe_hw *);
++ s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
++ s32 (*stop_adapter)(struct ixgbe_hw *);
++ s32 (*get_bus_info)(struct ixgbe_hw *);
++ s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
++ s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
++
++ /* Link */
+ s32 (*setup_link)(struct ixgbe_hw *);
+- s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *);
+- s32 (*setup_link_speed)(struct ixgbe_hw *, u32, bool, bool);
+- s32 (*get_link_settings)(struct ixgbe_hw *, u32 *, bool *);
++ s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
++ bool);
++ s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
++ s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
++ bool *);
++
++ /* LED */
++ s32 (*led_on)(struct ixgbe_hw *, u32);
++ s32 (*led_off)(struct ixgbe_hw *, u32);
++ s32 (*blink_led_start)(struct ixgbe_hw *, u32);
++ s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
++
++ /* RAR, Multicast, VLAN */
++ s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
++ s32 (*clear_rar)(struct ixgbe_hw *, u32);
++ s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
++ s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
++ s32 (*init_rx_addrs)(struct ixgbe_hw *);
++ s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32,
++ ixgbe_mc_addr_itr);
++ s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
++ ixgbe_mc_addr_itr);
++ s32 (*enable_mc)(struct ixgbe_hw *);
++ s32 (*disable_mc)(struct ixgbe_hw *);
++ s32 (*clear_vfta)(struct ixgbe_hw *);
++ s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
++ s32 (*init_uta_tables)(struct ixgbe_hw *);
++
++ /* Flow Control */
++ s32 (*setup_fc)(struct ixgbe_hw *, s32);
+ };
+
+ struct ixgbe_phy_operations {
++ s32 (*identify)(struct ixgbe_hw *);
++ s32 (*identify_sfp)(struct ixgbe_hw *);
++ s32 (*reset)(struct ixgbe_hw *);
++ s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
++ s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
+ s32 (*setup_link)(struct ixgbe_hw *);
+- s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *);
+- s32 (*setup_link_speed)(struct ixgbe_hw *, u32, bool, bool);
+-};
+-
+-struct ixgbe_mac_info {
+- struct ixgbe_mac_operations ops;
+- enum ixgbe_mac_type type;
+- u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+- u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+- s32 mc_filter_type;
+- u32 num_rx_queues;
+- u32 num_tx_queues;
+- u32 num_rx_addrs;
+- u32 link_attach_type;
+- u32 link_mode_select;
+- bool link_settings_loaded;
++ s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
++ bool);
++ s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
++ s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
++ s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
++ s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
+ };
+
+ struct ixgbe_eeprom_info {
+- enum ixgbe_eeprom_type type;
+- u16 word_size;
+- u16 address_bits;
++ struct ixgbe_eeprom_operations ops;
++ enum ixgbe_eeprom_type type;
++ u32 semaphore_delay;
++ u16 word_size;
++ u16 address_bits;
+ };
+
+-struct ixgbe_phy_info {
+- struct ixgbe_phy_operations ops;
+-
+- enum ixgbe_phy_type type;
+- u32 addr;
+- u32 id;
+- u32 revision;
+- enum ixgbe_media_type media_type;
+- u32 autoneg_advertised;
+- bool autoneg_wait_to_complete;
++struct ixgbe_mac_info {
++ struct ixgbe_mac_operations ops;
++ enum ixgbe_mac_type type;
++ u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
++ u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
++ s32 mc_filter_type;
++ u32 mcft_size;
++ u32 vft_size;
++ u32 num_rar_entries;
++ u32 max_tx_queues;
++ u32 max_rx_queues;
++ u32 link_attach_type;
++ u32 link_mode_select;
++ bool link_settings_loaded;
++ bool autoneg;
++ bool autoneg_failed;
+ };
+
+-struct ixgbe_info {
+- enum ixgbe_mac_type mac;
+- s32 (*get_invariants)(struct ixgbe_hw *);
+- struct ixgbe_mac_operations *mac_ops;
++struct ixgbe_phy_info {
++ struct ixgbe_phy_operations ops;
++ enum ixgbe_phy_type type;
++ u32 addr;
++ u32 id;
++ enum ixgbe_sfp_type sfp_type;
++ u32 revision;
++ enum ixgbe_media_type media_type;
++ bool reset_disable;
++ ixgbe_autoneg_advertised autoneg_advertised;
++ bool autoneg_wait_to_complete;
+ };
+
+ struct ixgbe_hw {
+@@ -1311,6 +1457,15 @@ struct ixgbe_hw {
+ bool adapter_stopped;
+ };
+
++struct ixgbe_info {
++ enum ixgbe_mac_type mac;
++ s32 (*get_invariants)(struct ixgbe_hw *);
++ struct ixgbe_mac_operations *mac_ops;
++ struct ixgbe_eeprom_operations *eeprom_ops;
++ struct ixgbe_phy_operations *phy_ops;
++};
++
++
+ /* Error Codes */
+ #define IXGBE_ERR_EEPROM -1
+ #define IXGBE_ERR_EEPROM_CHECKSUM -2
+@@ -1329,6 +1484,8 @@ struct ixgbe_hw {
+ #define IXGBE_ERR_RESET_FAILED -15
+ #define IXGBE_ERR_SWFW_SYNC -16
+ #define IXGBE_ERR_PHY_ADDR_INVALID -17
++#define IXGBE_ERR_I2C -18
++#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
+ #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
+
+ #endif /* _IXGBE_TYPE_H_ */
+--- a/drivers/net/Kconfig
++++ b/drivers/net/Kconfig
+@@ -2381,7 +2381,6 @@ config EHEA
+ config IXGBE
+ tristate "Intel(R) 10GbE PCI Express adapters support"
+ depends on PCI && INET
+- select INET_LRO
+ ---help---
+ This driver supports Intel(R) 10GbE PCI Express family of
+ adapters. For more information on how to identify your adapter, go
+@@ -2397,6 +2396,16 @@ config IXGBE
+ To compile this driver as a module, choose M here. The module
+ will be called ixgbe.
+
++config IXGBE_LRO
++ bool "Use software LRO"
++ depends on IXGBE && INET
++ select INET_LRO
++ default y
++ ---help---
++ Say Y here if you want to use large receive offload.
++
++ If in doubt, say N.
++
+ config IXGB
+ tristate "Intel(R) PRO/10GbE support"
+ depends on PCI