]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/suse-2.6.27.31/patches.drivers/ixgbe-fcoe-bugfixes
Move xen patchset to new version's subdir.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.drivers / ixgbe-fcoe-bugfixes
1 From 401b9a9a678be30083ff7f68fb1ab3fb898d50b4 Mon Sep 17 00:00:00 2001
2 From: Hannes Reinecke <hare@suse.de>
3 Date: Wed, 17 Sep 2008 16:35:05 +0200
4 Subject: [PATCH] ixgbe: Bugfixes for FCoE
5
6 Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
7 Signed-off-by: Hannes Reinecke <hare@suse.de>
8 ---
9 drivers/net/Kconfig | 11
10 drivers/net/ixgbe/ixgbe.h | 113 +-
11 drivers/net/ixgbe/ixgbe_82598.c | 628 +++++++++---
12 drivers/net/ixgbe/ixgbe_common.c | 1064 ++++++++++++++------
13 drivers/net/ixgbe/ixgbe_common.h | 60 -
14 drivers/net/ixgbe/ixgbe_ethtool.c | 307 +++--
15 drivers/net/ixgbe/ixgbe_main.c | 1981 +++++++++++++++++++++-----------------
16 drivers/net/ixgbe/ixgbe_phy.c | 248 +---
17 drivers/net/ixgbe/ixgbe_phy.h | 63 -
18 drivers/net/ixgbe/ixgbe_type.h | 559 ++++++----
19 10 files changed, 3159 insertions(+), 1875 deletions(-)
20
21 --- a/drivers/net/ixgbe/ixgbe_82598.c
22 +++ b/drivers/net/ixgbe/ixgbe_82598.c
23 @@ -1,7 +1,7 @@
24 /*******************************************************************************
25
26 Intel 10 Gigabit PCI Express Linux driver
27 - Copyright(c) 1999 - 2007 Intel Corporation.
28 + Copyright(c) 1999 - 2008 Intel Corporation.
29
30 This program is free software; you can redistribute it and/or modify it
31 under the terms and conditions of the GNU General Public License,
32 @@ -20,7 +20,6 @@
33 the file called "COPYING".
34
35 Contact Information:
36 - Linux NICS <linux.nics@intel.com>
37 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
38 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
39
40 @@ -36,67 +35,62 @@
41 #define IXGBE_82598_MAX_TX_QUEUES 32
42 #define IXGBE_82598_MAX_RX_QUEUES 64
43 #define IXGBE_82598_RAR_ENTRIES 16
44 +#define IXGBE_82598_MC_TBL_SIZE 128
45 +#define IXGBE_82598_VFT_TBL_SIZE 128
46
47 -static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw);
48 -static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed,
49 - bool *autoneg);
50 -static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw,
51 - u32 *speed, bool *autoneg);
52 -static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
53 -static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw);
54 -static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed,
55 - bool *link_up);
56 -static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, u32 speed,
57 - bool autoneg,
58 - bool autoneg_wait_to_complete);
59 +static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
60 + ixgbe_link_speed *speed,
61 + bool *autoneg);
62 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw);
63 -static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed,
64 - bool autoneg,
65 - bool autoneg_wait_to_complete);
66 -static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
67 -
68 +static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
69 + ixgbe_link_speed speed,
70 + bool autoneg,
71 + bool autoneg_wait_to_complete);
72
73 +/**
74 + */
75 static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
76 {
77 - hw->mac.num_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
78 - hw->mac.num_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
79 - hw->mac.num_rx_addrs = IXGBE_82598_RAR_ENTRIES;
80 -
81 - /* PHY ops are filled in by default properly for Fiber only */
82 - if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
83 - hw->mac.ops.setup_link = &ixgbe_setup_copper_link_82598;
84 - hw->mac.ops.setup_link_speed = &ixgbe_setup_copper_link_speed_82598;
85 - hw->mac.ops.get_link_settings =
86 - &ixgbe_get_copper_link_settings_82598;
87 -
88 - /* Call PHY identify routine to get the phy type */
89 - ixgbe_identify_phy(hw);
90 -
91 - switch (hw->phy.type) {
92 - case ixgbe_phy_tn:
93 - hw->phy.ops.setup_link = &ixgbe_setup_tnx_phy_link;
94 - hw->phy.ops.check_link = &ixgbe_check_tnx_phy_link;
95 - hw->phy.ops.setup_link_speed =
96 - &ixgbe_setup_tnx_phy_link_speed;
97 - break;
98 - default:
99 - break;
100 - }
101 + struct ixgbe_mac_info *mac = &hw->mac;
102 + struct ixgbe_phy_info *phy = &hw->phy;
103 +
104 + /* Call PHY identify routine to get the phy type */
105 + ixgbe_identify_phy_generic(hw);
106 +
107 + /* PHY Init */
108 + switch (phy->type) {
109 + default:
110 + break;
111 }
112
113 + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
114 + mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
115 + mac->ops.setup_link_speed =
116 + &ixgbe_setup_copper_link_speed_82598;
117 + mac->ops.get_link_capabilities =
118 + &ixgbe_get_copper_link_capabilities_82598;
119 + }
120 +
121 + mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
122 + mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
123 + mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
124 + mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
125 + mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
126 +
127 return 0;
128 }
129
130 /**
131 - * ixgbe_get_link_settings_82598 - Determines default link settings
132 + * ixgbe_get_link_capabilities_82598 - Determines link capabilities
133 * @hw: pointer to hardware structure
134 * @speed: pointer to link speed
135 * @autoneg: boolean auto-negotiation value
136 *
137 - * Determines the default link settings by reading the AUTOC register.
138 + * Determines the link capabilities by reading the AUTOC register.
139 **/
140 -static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed,
141 - bool *autoneg)
142 +static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
143 + ixgbe_link_speed *speed,
144 + bool *autoneg)
145 {
146 s32 status = 0;
147 s32 autoc_reg;
148 @@ -145,15 +139,16 @@ static s32 ixgbe_get_link_settings_82598
149 }
150
151 /**
152 - * ixgbe_get_copper_link_settings_82598 - Determines default link settings
153 + * ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities
154 * @hw: pointer to hardware structure
155 * @speed: pointer to link speed
156 * @autoneg: boolean auto-negotiation value
157 *
158 - * Determines the default link settings by reading the AUTOC register.
159 + * Determines the link capabilities by reading the AUTOC register.
160 **/
161 -static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw,
162 - u32 *speed, bool *autoneg)
163 +s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
164 + ixgbe_link_speed *speed,
165 + bool *autoneg)
166 {
167 s32 status = IXGBE_ERR_LINK_SETUP;
168 u16 speed_ability;
169 @@ -161,9 +156,9 @@ static s32 ixgbe_get_copper_link_setting
170 *speed = 0;
171 *autoneg = true;
172
173 - status = ixgbe_read_phy_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
174 - IXGBE_MDIO_PMA_PMD_DEV_TYPE,
175 - &speed_ability);
176 + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
177 + IXGBE_MDIO_PMA_PMD_DEV_TYPE,
178 + &speed_ability);
179
180 if (status == 0) {
181 if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
182 @@ -191,11 +186,9 @@ static enum ixgbe_media_type ixgbe_get_m
183 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
184 case IXGBE_DEV_ID_82598EB_CX4:
185 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
186 + case IXGBE_DEV_ID_82598EB_XF_LR:
187 media_type = ixgbe_media_type_fiber;
188 break;
189 - case IXGBE_DEV_ID_82598AT_DUAL_PORT:
190 - media_type = ixgbe_media_type_copper;
191 - break;
192 default:
193 media_type = ixgbe_media_type_unknown;
194 break;
195 @@ -205,6 +198,122 @@ static enum ixgbe_media_type ixgbe_get_m
196 }
197
198 /**
199 + * ixgbe_setup_fc_82598 - Configure flow control settings
200 + * @hw: pointer to hardware structure
201 + * @packetbuf_num: packet buffer number (0-7)
202 + *
203 + * Configures the flow control settings based on SW configuration. This
204 + * function is used for 802.3x flow control configuration only.
205 + **/
206 +s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
207 +{
208 + u32 frctl_reg;
209 + u32 rmcs_reg;
210 +
211 + if (packetbuf_num < 0 || packetbuf_num > 7) {
212 + hw_dbg(hw, "Invalid packet buffer number [%d], expected range is"
213 + " 0-7\n", packetbuf_num);
214 + }
215 +
216 + frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
217 + frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
218 +
219 + rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
220 + rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
221 +
222 + /*
223 + * 10 gig parts do not have a word in the EEPROM to determine the
224 + * default flow control setting, so we explicitly set it to full.
225 + */
226 + if (hw->fc.type == ixgbe_fc_default)
227 + hw->fc.type = ixgbe_fc_full;
228 +
229 + /*
230 + * We want to save off the original Flow Control configuration just in
231 + * case we get disconnected and then reconnected into a different hub
232 + * or switch with different Flow Control capabilities.
233 + */
234 + hw->fc.original_type = hw->fc.type;
235 +
236 + /*
237 + * The possible values of the "flow_control" parameter are:
238 + * 0: Flow control is completely disabled
239 + * 1: Rx flow control is enabled (we can receive pause frames but not
240 + * send pause frames).
241 + * 2: Tx flow control is enabled (we can send pause frames but we do not
242 + * support receiving pause frames)
243 + * 3: Both Rx and Tx flow control (symmetric) are enabled.
244 + * other: Invalid.
245 + */
246 + switch (hw->fc.type) {
247 + case ixgbe_fc_none:
248 + break;
249 + case ixgbe_fc_rx_pause:
250 + /*
251 + * Rx Flow control is enabled,
252 + * and Tx Flow control is disabled.
253 + */
254 + frctl_reg |= IXGBE_FCTRL_RFCE;
255 + break;
256 + case ixgbe_fc_tx_pause:
257 + /*
258 + * Tx Flow control is enabled, and Rx Flow control is disabled,
259 + * by a software over-ride.
260 + */
261 + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
262 + break;
263 + case ixgbe_fc_full:
264 + /*
265 + * Flow control (both Rx and Tx) is enabled by a software
266 + * over-ride.
267 + */
268 + frctl_reg |= IXGBE_FCTRL_RFCE;
269 + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
270 + break;
271 + default:
272 + /* We should never get here. The value should be 0-3. */
273 + hw_dbg(hw, "Flow control param set incorrectly\n");
274 + break;
275 + }
276 +
277 + /* Enable 802.3x based flow control settings. */
278 + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg);
279 + IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
280 +
281 + /*
282 + * Check for invalid software configuration, zeros are completely
283 + * invalid for all parameters used past this point, and if we enable
284 + * flow control with zero water marks, we blast flow control packets.
285 + */
286 + if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
287 + hw_dbg(hw, "Flow control structure initialized incorrectly\n");
288 + return IXGBE_ERR_INVALID_LINK_SETTINGS;
289 + }
290 +
291 + /*
292 + * We need to set up the Receive Threshold high and low water
293 + * marks as well as (optionally) enabling the transmission of
294 + * XON frames.
295 + */
296 + if (hw->fc.type & ixgbe_fc_tx_pause) {
297 + if (hw->fc.send_xon) {
298 + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
299 + (hw->fc.low_water | IXGBE_FCRTL_XONE));
300 + } else {
301 + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
302 + hw->fc.low_water);
303 + }
304 + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
305 + (hw->fc.high_water)|IXGBE_FCRTH_FCEN);
306 + }
307 +
308 + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time);
309 + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
310 +
311 + return 0;
312 +}
313 +
314 +/**
315 * ixgbe_setup_mac_link_82598 - Configures MAC link settings
316 * @hw: pointer to hardware structure
317 *
318 @@ -248,8 +357,7 @@ static s32 ixgbe_setup_mac_link_82598(st
319 }
320 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
321 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
322 - hw_dbg(hw,
323 - "Autonegotiation did not complete.\n");
324 + hw_dbg(hw, "Autonegotiation did not complete.\n");
325 }
326 }
327 }
328 @@ -259,8 +367,8 @@ static s32 ixgbe_setup_mac_link_82598(st
329 * case we get disconnected and then reconnected into a different hub
330 * or switch with different Flow Control capabilities.
331 */
332 - hw->fc.type = hw->fc.original_type;
333 - ixgbe_setup_fc(hw, 0);
334 + hw->fc.original_type = hw->fc.type;
335 + ixgbe_setup_fc_82598(hw, 0);
336
337 /* Add delay to filter out noises during initial link setup */
338 msleep(50);
339 @@ -273,20 +381,35 @@ static s32 ixgbe_setup_mac_link_82598(st
340 * @hw: pointer to hardware structure
341 * @speed: pointer to link speed
342 * @link_up: true is link is up, false otherwise
343 + * @link_up_wait_to_complete: bool used to wait for link up or not
344 *
345 * Reads the links register to determine if link is up and the current speed
346 **/
347 -static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed,
348 - bool *link_up)
349 +static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
350 + ixgbe_link_speed *speed, bool *link_up,
351 + bool link_up_wait_to_complete)
352 {
353 u32 links_reg;
354 + u32 i;
355
356 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
357 -
358 - if (links_reg & IXGBE_LINKS_UP)
359 - *link_up = true;
360 - else
361 - *link_up = false;
362 + if (link_up_wait_to_complete) {
363 + for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
364 + if (links_reg & IXGBE_LINKS_UP) {
365 + *link_up = true;
366 + break;
367 + } else {
368 + *link_up = false;
369 + }
370 + msleep(100);
371 + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
372 + }
373 + } else {
374 + if (links_reg & IXGBE_LINKS_UP)
375 + *link_up = true;
376 + else
377 + *link_up = false;
378 + }
379
380 if (links_reg & IXGBE_LINKS_SPEED)
381 *speed = IXGBE_LINK_SPEED_10GB_FULL;
382 @@ -296,6 +419,7 @@ static s32 ixgbe_check_mac_link_82598(st
383 return 0;
384 }
385
386 +
387 /**
388 * ixgbe_setup_mac_link_speed_82598 - Set MAC link speed
389 * @hw: pointer to hardware structure
390 @@ -306,18 +430,18 @@ static s32 ixgbe_check_mac_link_82598(st
391 * Set the link speed in the AUTOC register and restarts link.
392 **/
393 static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
394 - u32 speed, bool autoneg,
395 - bool autoneg_wait_to_complete)
396 + ixgbe_link_speed speed, bool autoneg,
397 + bool autoneg_wait_to_complete)
398 {
399 s32 status = 0;
400
401 /* If speed is 10G, then check for CX4 or XAUI. */
402 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
403 - (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4)))
404 + (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) {
405 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
406 - else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg))
407 + } else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) {
408 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
409 - else if (autoneg) {
410 + } else if (autoneg) {
411 /* BX mode - Autonegotiate 1G */
412 if (!(hw->mac.link_attach_type & IXGBE_AUTOC_1G_PMA_PMD))
413 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_AN;
414 @@ -336,7 +460,7 @@ static s32 ixgbe_setup_mac_link_speed_82
415 * ixgbe_hw This will write the AUTOC register based on the new
416 * stored values
417 */
418 - hw->mac.ops.setup_link(hw);
419 + ixgbe_setup_mac_link_82598(hw);
420 }
421
422 return status;
423 @@ -354,18 +478,17 @@ static s32 ixgbe_setup_mac_link_speed_82
424 **/
425 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw)
426 {
427 - s32 status = 0;
428 + s32 status;
429
430 /* Restart autonegotiation on PHY */
431 - if (hw->phy.ops.setup_link)
432 - status = hw->phy.ops.setup_link(hw);
433 + status = hw->phy.ops.setup_link(hw);
434
435 - /* Set MAC to KX/KX4 autoneg, which defaultis to Parallel detection */
436 + /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
437 hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
438 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN;
439
440 /* Set up MAC */
441 - hw->mac.ops.setup_link(hw);
442 + ixgbe_setup_mac_link_82598(hw);
443
444 return status;
445 }
446 @@ -379,23 +502,23 @@ static s32 ixgbe_setup_copper_link_82598
447 *
448 * Sets the link speed in the AUTOC register in the MAC and restarts link.
449 **/
450 -static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed,
451 - bool autoneg,
452 - bool autoneg_wait_to_complete)
453 +static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
454 + ixgbe_link_speed speed,
455 + bool autoneg,
456 + bool autoneg_wait_to_complete)
457 {
458 - s32 status = 0;
459 + s32 status;
460
461 /* Setup the PHY according to input speed */
462 - if (hw->phy.ops.setup_link_speed)
463 - status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
464 - autoneg_wait_to_complete);
465 + status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
466 + autoneg_wait_to_complete);
467
468 /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
469 hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
470 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN;
471
472 /* Set up MAC */
473 - hw->mac.ops.setup_link(hw);
474 + ixgbe_setup_mac_link_82598(hw);
475
476 return status;
477 }
478 @@ -404,7 +527,7 @@ static s32 ixgbe_setup_copper_link_speed
479 * ixgbe_reset_hw_82598 - Performs hardware reset
480 * @hw: pointer to hardware structure
481 *
482 - * Resets the hardware by reseting the transmit and receive units, masks and
483 + * Resets the hardware by resetting the transmit and receive units, masks and
484 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
485 * reset.
486 **/
487 @@ -418,35 +541,44 @@ static s32 ixgbe_reset_hw_82598(struct i
488 u8 analog_val;
489
490 /* Call adapter stop to disable tx/rx and clear interrupts */
491 - ixgbe_stop_adapter(hw);
492 + hw->mac.ops.stop_adapter(hw);
493
494 /*
495 - * Power up the Atlas TX lanes if they are currently powered down.
496 - * Atlas TX lanes are powered down for MAC loopback tests, but
497 + * Power up the Atlas Tx lanes if they are currently powered down.
498 + * Atlas Tx lanes are powered down for MAC loopback tests, but
499 * they are not automatically restored on reset.
500 */
501 - ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
502 + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
503 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
504 - /* Enable TX Atlas so packets can be transmitted again */
505 - ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
506 + /* Enable Tx Atlas so packets can be transmitted again */
507 + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
508 + &analog_val);
509 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
510 - ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, analog_val);
511 + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
512 + analog_val);
513
514 - ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &analog_val);
515 + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
516 + &analog_val);
517 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
518 - ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, analog_val);
519 + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
520 + analog_val);
521
522 - ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &analog_val);
523 + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
524 + &analog_val);
525 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
526 - ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, analog_val);
527 + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
528 + analog_val);
529
530 - ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &analog_val);
531 + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
532 + &analog_val);
533 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
534 - ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, analog_val);
535 + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
536 + analog_val);
537 }
538
539 /* Reset PHY */
540 - ixgbe_reset_phy(hw);
541 + if (hw->phy.reset_disable == false)
542 + hw->phy.ops.reset(hw);
543
544 /*
545 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
546 @@ -499,29 +631,311 @@ static s32 ixgbe_reset_hw_82598(struct i
547 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
548 } else {
549 hw->mac.link_attach_type =
550 - (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE);
551 + (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE);
552 hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK);
553 hw->mac.link_settings_loaded = true;
554 }
555
556 /* Store the permanent mac address */
557 - ixgbe_get_mac_addr(hw, hw->mac.perm_addr);
558 + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
559
560 return status;
561 }
562
563 +/**
564 + * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
565 + * @hw: pointer to hardware struct
566 + * @rar: receive address register index to associate with a VMDq index
567 + * @vmdq: VMDq set index
568 + **/
569 +s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
570 +{
571 + u32 rar_high;
572 +
573 + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
574 + rar_high &= ~IXGBE_RAH_VIND_MASK;
575 + rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
576 + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
577 + return 0;
578 +}
579 +
580 +/**
581 + * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
582 + * @hw: pointer to hardware struct
583 + * @rar: receive address register index to associate with a VMDq index
584 + * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
585 + **/
586 +static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
587 +{
588 + u32 rar_high;
589 + u32 rar_entries = hw->mac.num_rar_entries;
590 +
591 + if (rar < rar_entries) {
592 + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
593 + if (rar_high & IXGBE_RAH_VIND_MASK) {
594 + rar_high &= ~IXGBE_RAH_VIND_MASK;
595 + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
596 + }
597 + } else {
598 + hw_dbg(hw, "RAR index %d is out of range.\n", rar);
599 + }
600 +
601 + return 0;
602 +}
603 +
604 +/**
605 + * ixgbe_set_vfta_82598 - Set VLAN filter table
606 + * @hw: pointer to hardware structure
607 + * @vlan: VLAN id to write to VLAN filter
608 + * @vind: VMDq output index that maps queue to VLAN id in VFTA
609 + * @vlan_on: boolean flag to turn on/off VLAN in VFTA
610 + *
611 + * Turn on/off specified VLAN in the VLAN filter table.
612 + **/
613 +s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
614 + bool vlan_on)
615 +{
616 + u32 regindex;
617 + u32 bitindex;
618 + u32 bits;
619 + u32 vftabyte;
620 +
621 + if (vlan > 4095)
622 + return IXGBE_ERR_PARAM;
623 +
624 + /* Determine 32-bit word position in array */
625 + regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
626 +
627 + /* Determine the location of the (VMD) queue index */
628 + vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
629 + bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
630 +
631 + /* Set the nibble for VMD queue index */
632 + bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
633 + bits &= (~(0x0F << bitindex));
634 + bits |= (vind << bitindex);
635 + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
636 +
637 + /* Determine the location of the bit for this VLAN id */
638 + bitindex = vlan & 0x1F; /* lower five bits */
639 +
640 + bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
641 + if (vlan_on)
642 + /* Turn on this VLAN id */
643 + bits |= (1 << bitindex);
644 + else
645 + /* Turn off this VLAN id */
646 + bits &= ~(1 << bitindex);
647 + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
648 +
649 + return 0;
650 +}
651 +
652 +/**
653 + * ixgbe_clear_vfta_82598 - Clear VLAN filter table
654 + * @hw: pointer to hardware structure
655 + *
656 + * Clears the VLAN filer table, and the VMDq index associated with the filter
657 + **/
658 +static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
659 +{
660 + u32 offset;
661 + u32 vlanbyte;
662 +
663 + for (offset = 0; offset < hw->mac.vft_size; offset++)
664 + IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
665 +
666 + for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
667 + for (offset = 0; offset < hw->mac.vft_size; offset++)
668 + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
669 + 0);
670 +
671 + return 0;
672 +}
673 +
674 +/**
675 + * ixgbe_blink_led_start_82598 - Blink LED based on index.
676 + * @hw: pointer to hardware structure
677 + * @index: led number to blink
678 + **/
679 +static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index)
680 +{
681 + ixgbe_link_speed speed = 0;
682 + bool link_up = 0;
683 + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
684 + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
685 +
686 + /*
687 + * Link must be up to auto-blink the LEDs on the 82598EB MAC;
688 + * force it if link is down.
689 + */
690 + hw->mac.ops.check_link(hw, &speed, &link_up, false);
691 +
692 + if (!link_up) {
693 + autoc_reg |= IXGBE_AUTOC_FLU;
694 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
695 + msleep(10);
696 + }
697 +
698 + led_reg &= ~IXGBE_LED_MODE_MASK(index);
699 + led_reg |= IXGBE_LED_BLINK(index);
700 + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
701 + IXGBE_WRITE_FLUSH(hw);
702 +
703 + return 0;
704 +}
705 +
706 +/**
707 + * ixgbe_blink_led_stop_82598 - Stop blinking LED based on index.
708 + * @hw: pointer to hardware structure
709 + * @index: led number to stop blinking
710 + **/
711 +static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index)
712 +{
713 + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
714 + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
715 +
716 + autoc_reg &= ~IXGBE_AUTOC_FLU;
717 + autoc_reg |= IXGBE_AUTOC_AN_RESTART;
718 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
719 +
720 + led_reg &= ~IXGBE_LED_MODE_MASK(index);
721 + led_reg &= ~IXGBE_LED_BLINK(index);
722 + led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
723 + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
724 + IXGBE_WRITE_FLUSH(hw);
725 +
726 + return 0;
727 +}
728 +
729 +/**
730 + * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
731 + * @hw: pointer to hardware structure
732 + * @reg: analog register to read
733 + * @val: read value
734 + *
735 + * Performs read operation to Atlas analog register specified.
736 + **/
737 +s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
738 +{
739 + u32 atlas_ctl;
740 +
741 + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
742 + IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
743 + IXGBE_WRITE_FLUSH(hw);
744 + udelay(10);
745 + atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
746 + *val = (u8)atlas_ctl;
747 +
748 + return 0;
749 +}
750 +
751 +/**
752 + * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
753 + * @hw: pointer to hardware structure
754 + * @reg: atlas register to write
755 + * @val: value to write
756 + *
757 + * Performs write operation to Atlas analog register specified.
758 + **/
759 +s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
760 +{
761 + u32 atlas_ctl;
762 +
763 + atlas_ctl = (reg << 8) | val;
764 + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
765 + IXGBE_WRITE_FLUSH(hw);
766 + udelay(10);
767 +
768 + return 0;
769 +}
770 +
771 +/**
772 + * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
773 + * @hw: pointer to hardware structure
774 + *
775 + * Determines physical layer capabilities of the current configuration.
776 + **/
777 +s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
778 +{
779 + s32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
780 +
781 + switch (hw->device_id) {
782 + case IXGBE_DEV_ID_82598EB_CX4:
783 + case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
784 + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
785 + break;
786 + case IXGBE_DEV_ID_82598AF_DUAL_PORT:
787 + case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
788 + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
789 + break;
790 + case IXGBE_DEV_ID_82598EB_XF_LR:
791 + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
792 + break;
793 +
794 + default:
795 + physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
796 + break;
797 + }
798 +
799 + return physical_layer;
800 +}
801 +
802 static struct ixgbe_mac_operations mac_ops_82598 = {
803 - .reset = &ixgbe_reset_hw_82598,
804 + .init_hw = &ixgbe_init_hw_generic,
805 + .reset_hw = &ixgbe_reset_hw_82598,
806 + .start_hw = &ixgbe_start_hw_generic,
807 + .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
808 .get_media_type = &ixgbe_get_media_type_82598,
809 + .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598,
810 + .get_mac_addr = &ixgbe_get_mac_addr_generic,
811 + .stop_adapter = &ixgbe_stop_adapter_generic,
812 + .read_analog_reg8 = &ixgbe_read_analog_reg8_82598,
813 + .write_analog_reg8 = &ixgbe_write_analog_reg8_82598,
814 .setup_link = &ixgbe_setup_mac_link_82598,
815 - .check_link = &ixgbe_check_mac_link_82598,
816 .setup_link_speed = &ixgbe_setup_mac_link_speed_82598,
817 - .get_link_settings = &ixgbe_get_link_settings_82598,
818 + .check_link = &ixgbe_check_mac_link_82598,
819 + .get_link_capabilities = &ixgbe_get_link_capabilities_82598,
820 + .led_on = &ixgbe_led_on_generic,
821 + .led_off = &ixgbe_led_off_generic,
822 + .blink_led_start = &ixgbe_blink_led_start_82598,
823 + .blink_led_stop = &ixgbe_blink_led_stop_82598,
824 + .set_rar = &ixgbe_set_rar_generic,
825 + .clear_rar = &ixgbe_clear_rar_generic,
826 + .set_vmdq = &ixgbe_set_vmdq_82598,
827 + .clear_vmdq = &ixgbe_clear_vmdq_82598,
828 + .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
829 + .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
830 + .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
831 + .enable_mc = &ixgbe_enable_mc_generic,
832 + .disable_mc = &ixgbe_disable_mc_generic,
833 + .clear_vfta = &ixgbe_clear_vfta_82598,
834 + .set_vfta = &ixgbe_set_vfta_82598,
835 + .setup_fc = &ixgbe_setup_fc_82598,
836 +};
837 +
838 +static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
839 + .init_params = &ixgbe_init_eeprom_params_generic,
840 + .read = &ixgbe_read_eeprom_generic,
841 + .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
842 + .update_checksum = &ixgbe_update_eeprom_checksum_generic,
843 +};
844 +
845 +static struct ixgbe_phy_operations phy_ops_82598 = {
846 + .identify = &ixgbe_identify_phy_generic,
847 + /* .identify_sfp = &ixgbe_identify_sfp_module_generic, */
848 + .reset = &ixgbe_reset_phy_generic,
849 + .read_reg = &ixgbe_read_phy_reg_generic,
850 + .write_reg = &ixgbe_write_phy_reg_generic,
851 + .setup_link = &ixgbe_setup_phy_link_generic,
852 + .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
853 };
854
855 struct ixgbe_info ixgbe_82598_info = {
856 .mac = ixgbe_mac_82598EB,
857 .get_invariants = &ixgbe_get_invariants_82598,
858 .mac_ops = &mac_ops_82598,
859 + .eeprom_ops = &eeprom_ops_82598,
860 + .phy_ops = &phy_ops_82598,
861 };
862
863 --- a/drivers/net/ixgbe/ixgbe_common.c
864 +++ b/drivers/net/ixgbe/ixgbe_common.c
865 @@ -1,7 +1,7 @@
866 /*******************************************************************************
867
868 Intel 10 Gigabit PCI Express Linux driver
869 - Copyright(c) 1999 - 2007 Intel Corporation.
870 + Copyright(c) 1999 - 2008 Intel Corporation.
871
872 This program is free software; you can redistribute it and/or modify it
873 under the terms and conditions of the GNU General Public License,
874 @@ -20,7 +20,6 @@
875 the file called "COPYING".
876
877 Contact Information:
878 - Linux NICS <linux.nics@intel.com>
879 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
880 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
881
882 @@ -33,20 +32,28 @@
883 #include "ixgbe_common.h"
884 #include "ixgbe_phy.h"
885
886 -static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
887 -
888 static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw);
889 +static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
890 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
891 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
892 +static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
893 +static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
894 +static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
895 + u16 count);
896 +static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
897 +static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
898 +static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
899 +static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
900 static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
901
902 -static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
903 -static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
904 +static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
905 +static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
906 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
907 static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr);
908 +static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
909
910 /**
911 - * ixgbe_start_hw - Prepare hardware for TX/RX
912 + * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
913 * @hw: pointer to hardware structure
914 *
915 * Starts the hardware by filling the bus info structure and media type, clears
916 @@ -54,7 +61,7 @@ static void ixgbe_add_mc_addr(struct ixg
917 * table, VLAN filter table, calls routine to set up link and flow control
918 * settings, and leaves transmit and receive units disabled and uninitialized
919 **/
920 -s32 ixgbe_start_hw(struct ixgbe_hw *hw)
921 +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
922 {
923 u32 ctrl_ext;
924
925 @@ -62,22 +69,22 @@ s32 ixgbe_start_hw(struct ixgbe_hw *hw)
926 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
927
928 /* Identify the PHY */
929 - ixgbe_identify_phy(hw);
930 + hw->phy.ops.identify(hw);
931
932 /*
933 * Store MAC address from RAR0, clear receive address registers, and
934 * clear the multicast table
935 */
936 - ixgbe_init_rx_addrs(hw);
937 + hw->mac.ops.init_rx_addrs(hw);
938
939 /* Clear the VLAN filter table */
940 - ixgbe_clear_vfta(hw);
941 + hw->mac.ops.clear_vfta(hw);
942
943 /* Set up link */
944 hw->mac.ops.setup_link(hw);
945
946 /* Clear statistics registers */
947 - ixgbe_clear_hw_cntrs(hw);
948 + hw->mac.ops.clear_hw_cntrs(hw);
949
950 /* Set No Snoop Disable */
951 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
952 @@ -92,34 +99,34 @@ s32 ixgbe_start_hw(struct ixgbe_hw *hw)
953 }
954
955 /**
956 - * ixgbe_init_hw - Generic hardware initialization
957 + * ixgbe_init_hw_generic - Generic hardware initialization
958 * @hw: pointer to hardware structure
959 *
960 - * Initialize the hardware by reseting the hardware, filling the bus info
961 + * Initialize the hardware by resetting the hardware, filling the bus info
962 * structure and media type, clears all on chip counters, initializes receive
963 * address registers, multicast table, VLAN filter table, calls routine to set
964 * up link and flow control settings, and leaves transmit and receive units
965 * disabled and uninitialized
966 **/
967 -s32 ixgbe_init_hw(struct ixgbe_hw *hw)
968 +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
969 {
970 /* Reset the hardware */
971 - hw->mac.ops.reset(hw);
972 + hw->mac.ops.reset_hw(hw);
973
974 /* Start the HW */
975 - ixgbe_start_hw(hw);
976 + hw->mac.ops.start_hw(hw);
977
978 return 0;
979 }
980
981 /**
982 - * ixgbe_clear_hw_cntrs - Generic clear hardware counters
983 + * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
984 * @hw: pointer to hardware structure
985 *
986 * Clears all hardware statistics counters by reading them from the hardware
987 * Statistics counters are clear on read.
988 **/
989 -static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
990 +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
991 {
992 u16 i = 0;
993
994 @@ -191,7 +198,36 @@ static s32 ixgbe_clear_hw_cntrs(struct i
995 }
996
997 /**
998 - * ixgbe_get_mac_addr - Generic get MAC address
999 + * ixgbe_read_pba_num_generic - Reads part number from EEPROM
1000 + * @hw: pointer to hardware structure
1001 + * @pba_num: stores the part number from the EEPROM
1002 + *
1003 + * Reads the part number from the EEPROM.
1004 + **/
1005 +s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
1006 +{
1007 + s32 ret_val;
1008 + u16 data;
1009 +
1010 + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
1011 + if (ret_val) {
1012 + hw_dbg(hw, "NVM Read Error\n");
1013 + return ret_val;
1014 + }
1015 + *pba_num = (u32)(data << 16);
1016 +
1017 + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
1018 + if (ret_val) {
1019 + hw_dbg(hw, "NVM Read Error\n");
1020 + return ret_val;
1021 + }
1022 + *pba_num |= data;
1023 +
1024 + return 0;
1025 +}
1026 +
1027 +/**
1028 + * ixgbe_get_mac_addr_generic - Generic get MAC address
1029 * @hw: pointer to hardware structure
1030 * @mac_addr: Adapter MAC address
1031 *
1032 @@ -199,7 +235,7 @@ static s32 ixgbe_clear_hw_cntrs(struct i
1033 * A reset of the adapter must be performed prior to calling this function
1034 * in order for the MAC address to have been loaded from the EEPROM into RAR0
1035 **/
1036 -s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
1037 +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
1038 {
1039 u32 rar_high;
1040 u32 rar_low;
1041 @@ -217,30 +253,8 @@ s32 ixgbe_get_mac_addr(struct ixgbe_hw *
1042 return 0;
1043 }
1044
1045 -s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num)
1046 -{
1047 - s32 ret_val;
1048 - u16 data;
1049 -
1050 - ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM0_PTR, &data);
1051 - if (ret_val) {
1052 - hw_dbg(hw, "NVM Read Error\n");
1053 - return ret_val;
1054 - }
1055 - *part_num = (u32)(data << 16);
1056 -
1057 - ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM1_PTR, &data);
1058 - if (ret_val) {
1059 - hw_dbg(hw, "NVM Read Error\n");
1060 - return ret_val;
1061 - }
1062 - *part_num |= data;
1063 -
1064 - return 0;
1065 -}
1066 -
1067 /**
1068 - * ixgbe_stop_adapter - Generic stop TX/RX units
1069 + * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1070 * @hw: pointer to hardware structure
1071 *
1072 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1073 @@ -248,7 +262,7 @@ s32 ixgbe_read_part_num(struct ixgbe_hw
1074 * the shared code and drivers to determine if the adapter is in a stopped
1075 * state and should not touch the hardware.
1076 **/
1077 -s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
1078 +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1079 {
1080 u32 number_of_queues;
1081 u32 reg_val;
1082 @@ -264,6 +278,7 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *
1083 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1084 reg_val &= ~(IXGBE_RXCTRL_RXEN);
1085 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
1086 + IXGBE_WRITE_FLUSH(hw);
1087 msleep(2);
1088
1089 /* Clear interrupt mask to stop from interrupts being generated */
1090 @@ -273,7 +288,7 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *
1091 IXGBE_READ_REG(hw, IXGBE_EICR);
1092
1093 /* Disable the transmit unit. Each queue must be disabled. */
1094 - number_of_queues = hw->mac.num_tx_queues;
1095 + number_of_queues = hw->mac.max_tx_queues;
1096 for (i = 0; i < number_of_queues; i++) {
1097 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
1098 if (reg_val & IXGBE_TXDCTL_ENABLE) {
1099 @@ -282,15 +297,22 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *
1100 }
1101 }
1102
1103 + /*
1104 + * Prevent the PCI-E bus from from hanging by disabling PCI-E master
1105 + * access and verify no pending requests
1106 + */
1107 + if (ixgbe_disable_pcie_master(hw) != 0)
1108 + hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
1109 +
1110 return 0;
1111 }
1112
1113 /**
1114 - * ixgbe_led_on - Turns on the software controllable LEDs.
1115 + * ixgbe_led_on_generic - Turns on the software controllable LEDs.
1116 * @hw: pointer to hardware structure
1117 * @index: led number to turn on
1118 **/
1119 -s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index)
1120 +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1121 {
1122 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1123
1124 @@ -304,11 +326,11 @@ s32 ixgbe_led_on(struct ixgbe_hw *hw, u3
1125 }
1126
1127 /**
1128 - * ixgbe_led_off - Turns off the software controllable LEDs.
1129 + * ixgbe_led_off_generic - Turns off the software controllable LEDs.
1130 * @hw: pointer to hardware structure
1131 * @index: led number to turn off
1132 **/
1133 -s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index)
1134 +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1135 {
1136 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1137
1138 @@ -321,15 +343,14 @@ s32 ixgbe_led_off(struct ixgbe_hw *hw, u
1139 return 0;
1140 }
1141
1142 -
1143 /**
1144 - * ixgbe_init_eeprom - Initialize EEPROM params
1145 + * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1146 * @hw: pointer to hardware structure
1147 *
1148 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1149 * ixgbe_hw struct in order to set up EEPROM access.
1150 **/
1151 -s32 ixgbe_init_eeprom(struct ixgbe_hw *hw)
1152 +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1153 {
1154 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1155 u32 eec;
1156 @@ -337,6 +358,9 @@ s32 ixgbe_init_eeprom(struct ixgbe_hw *h
1157
1158 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1159 eeprom->type = ixgbe_eeprom_none;
1160 + /* Set default semaphore delay to 10ms which is a well
1161 + * tested value */
1162 + eeprom->semaphore_delay = 10;
1163
1164 /*
1165 * Check for EEPROM present first.
1166 @@ -369,18 +393,85 @@ s32 ixgbe_init_eeprom(struct ixgbe_hw *h
1167 }
1168
1169 /**
1170 - * ixgbe_read_eeprom - Read EEPROM word using EERD
1171 + * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1172 + * @hw: pointer to hardware structure
1173 + * @offset: offset within the EEPROM to be read
1174 + * @data: read 16 bit value from EEPROM
1175 + *
1176 + * Reads 16 bit value from EEPROM through bit-bang method
1177 + **/
1178 +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1179 + u16 *data)
1180 +{
1181 + s32 status;
1182 + u16 word_in;
1183 + u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1184 +
1185 + hw->eeprom.ops.init_params(hw);
1186 +
1187 + if (offset >= hw->eeprom.word_size) {
1188 + status = IXGBE_ERR_EEPROM;
1189 + goto out;
1190 + }
1191 +
1192 + /* Prepare the EEPROM for reading */
1193 + status = ixgbe_acquire_eeprom(hw);
1194 +
1195 + if (status == 0) {
1196 + if (ixgbe_ready_eeprom(hw) != 0) {
1197 + ixgbe_release_eeprom(hw);
1198 + status = IXGBE_ERR_EEPROM;
1199 + }
1200 + }
1201 +
1202 + if (status == 0) {
1203 + ixgbe_standby_eeprom(hw);
1204 +
1205 + /*
1206 + * Some SPI eeproms use the 8th address bit embedded in the
1207 + * opcode
1208 + */
1209 + if ((hw->eeprom.address_bits == 8) && (offset >= 128))
1210 + read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1211 +
1212 + /* Send the READ command (opcode + addr) */
1213 + ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1214 + IXGBE_EEPROM_OPCODE_BITS);
1215 + ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
1216 + hw->eeprom.address_bits);
1217 +
1218 + /* Read the data. */
1219 + word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1220 + *data = (word_in >> 8) | (word_in << 8);
1221 +
1222 + /* End this read operation */
1223 + ixgbe_release_eeprom(hw);
1224 + }
1225 +
1226 +out:
1227 + return status;
1228 +}
1229 +
1230 +/**
1231 + * ixgbe_read_eeprom_generic - Read EEPROM word using EERD
1232 * @hw: pointer to hardware structure
1233 * @offset: offset of word in the EEPROM to read
1234 * @data: word read from the EEPROM
1235 *
1236 * Reads a 16 bit word from the EEPROM using the EERD register.
1237 **/
1238 -s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data)
1239 +s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1240 {
1241 u32 eerd;
1242 s32 status;
1243
1244 + hw->eeprom.ops.init_params(hw);
1245 +
1246 + if (offset >= hw->eeprom.word_size) {
1247 + status = IXGBE_ERR_EEPROM;
1248 + goto out;
1249 + }
1250 +
1251 eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) +
1252 IXGBE_EEPROM_READ_REG_START;
1253
1254 @@ -389,10 +480,11 @@ s32 ixgbe_read_eeprom(struct ixgbe_hw *h
1255
1256 if (status == 0)
1257 *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1258 - IXGBE_EEPROM_READ_REG_DATA);
1259 + IXGBE_EEPROM_READ_REG_DATA);
1260 else
1261 hw_dbg(hw, "Eeprom read timed out\n");
1262
1263 +out:
1264 return status;
1265 }
1266
1267 @@ -420,6 +512,58 @@ static s32 ixgbe_poll_eeprom_eerd_done(s
1268 }
1269
1270 /**
1271 + * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1272 + * @hw: pointer to hardware structure
1273 + *
1274 + * Prepares EEPROM for access using bit-bang method. This function should
1275 + * be called before issuing a command to the EEPROM.
1276 + **/
1277 +static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1278 +{
1279 + s32 status = 0;
1280 + u32 eec;
1281 + u32 i;
1282 +
1283 + if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
1284 + status = IXGBE_ERR_SWFW_SYNC;
1285 +
1286 + if (status == 0) {
1287 + eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1288 +
1289 + /* Request EEPROM Access */
1290 + eec |= IXGBE_EEC_REQ;
1291 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1292 +
1293 + for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1294 + eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1295 + if (eec & IXGBE_EEC_GNT)
1296 + break;
1297 + udelay(5);
1298 + }
1299 +
1300 + /* Release if grant not acquired */
1301 + if (!(eec & IXGBE_EEC_GNT)) {
1302 + eec &= ~IXGBE_EEC_REQ;
1303 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1304 + hw_dbg(hw, "Could not acquire EEPROM grant\n");
1305 +
1306 + ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1307 + status = IXGBE_ERR_EEPROM;
1308 + }
1309 + }
1310 +
1311 + /* Setup EEPROM for Read/Write */
1312 + if (status == 0) {
1313 + /* Clear CS and SK */
1314 + eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1315 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1316 + IXGBE_WRITE_FLUSH(hw);
1317 + udelay(1);
1318 + }
1319 + return status;
1320 +}
1321 +
1322 +/**
1323 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1324 * @hw: pointer to hardware structure
1325 *
1326 @@ -475,7 +619,7 @@ static s32 ixgbe_get_eeprom_semaphore(st
1327 */
1328 if (i >= timeout) {
1329 hw_dbg(hw, "Driver can't access the Eeprom - Semaphore "
1330 - "not granted.\n");
1331 + "not granted.\n");
1332 ixgbe_release_eeprom_semaphore(hw);
1333 status = IXGBE_ERR_EEPROM;
1334 }
1335 @@ -503,6 +647,217 @@ static void ixgbe_release_eeprom_semapho
1336 }
1337
1338 /**
1339 + * ixgbe_ready_eeprom - Polls for EEPROM ready
1340 + * @hw: pointer to hardware structure
1341 + **/
1342 +static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1343 +{
1344 + s32 status = 0;
1345 + u16 i;
1346 + u8 spi_stat_reg;
1347 +
1348 + /*
1349 + * Read "Status Register" repeatedly until the LSB is cleared. The
1350 + * EEPROM will signal that the command has been completed by clearing
1351 + * bit 0 of the internal status register. If it's not cleared within
1352 + * 5 milliseconds, then error out.
1353 + */
1354 + for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1355 + ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1356 + IXGBE_EEPROM_OPCODE_BITS);
1357 + spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1358 + if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1359 + break;
1360 +
1361 + udelay(5);
1362 + ixgbe_standby_eeprom(hw);
1363 + };
1364 +
1365 + /*
1366 + * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1367 + * devices (and only 0-5mSec on 5V devices)
1368 + */
1369 + if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1370 + hw_dbg(hw, "SPI EEPROM Status error\n");
1371 + status = IXGBE_ERR_EEPROM;
1372 + }
1373 +
1374 + return status;
1375 +}
1376 +
1377 +/**
1378 + * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1379 + * @hw: pointer to hardware structure
1380 + **/
1381 +static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1382 +{
1383 + u32 eec;
1384 +
1385 + eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1386 +
1387 + /* Toggle CS to flush commands */
1388 + eec |= IXGBE_EEC_CS;
1389 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1390 + IXGBE_WRITE_FLUSH(hw);
1391 + udelay(1);
1392 + eec &= ~IXGBE_EEC_CS;
1393 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1394 + IXGBE_WRITE_FLUSH(hw);
1395 + udelay(1);
1396 +}
1397 +
1398 +/**
1399 + * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1400 + * @hw: pointer to hardware structure
1401 + * @data: data to send to the EEPROM
1402 + * @count: number of bits to shift out
1403 + **/
1404 +static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1405 + u16 count)
1406 +{
1407 + u32 eec;
1408 + u32 mask;
1409 + u32 i;
1410 +
1411 + eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1412 +
1413 + /*
1414 + * Mask is used to shift "count" bits of "data" out to the EEPROM
1415 + * one bit at a time. Determine the starting bit based on count
1416 + */
1417 + mask = 0x01 << (count - 1);
1418 +
1419 + for (i = 0; i < count; i++) {
1420 + /*
1421 + * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1422 + * "1", and then raising and then lowering the clock (the SK
1423 + * bit controls the clock input to the EEPROM). A "0" is
1424 + * shifted out to the EEPROM by setting "DI" to "0" and then
1425 + * raising and then lowering the clock.
1426 + */
1427 + if (data & mask)
1428 + eec |= IXGBE_EEC_DI;
1429 + else
1430 + eec &= ~IXGBE_EEC_DI;
1431 +
1432 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1433 + IXGBE_WRITE_FLUSH(hw);
1434 +
1435 + udelay(1);
1436 +
1437 + ixgbe_raise_eeprom_clk(hw, &eec);
1438 + ixgbe_lower_eeprom_clk(hw, &eec);
1439 +
1440 + /*
1441 + * Shift mask to signify next bit of data to shift in to the
1442 + * EEPROM
1443 + */
1444 + mask = mask >> 1;
1445 + };
1446 +
1447 + /* We leave the "DI" bit set to "0" when we leave this routine. */
1448 + eec &= ~IXGBE_EEC_DI;
1449 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1450 + IXGBE_WRITE_FLUSH(hw);
1451 +}
1452 +
1453 +/**
1454 + * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1455 + * @hw: pointer to hardware structure
1456 + **/
1457 +static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
1458 +{
1459 + u32 eec;
1460 + u32 i;
1461 + u16 data = 0;
1462 +
1463 + /*
1464 + * In order to read a register from the EEPROM, we need to shift
1465 + * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1466 + * the clock input to the EEPROM (setting the SK bit), and then reading
1467 + * the value of the "DO" bit. During this "shifting in" process the
1468 + * "DI" bit should always be clear.
1469 + */
1470 + eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1471 +
1472 + eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1473 +
1474 + for (i = 0; i < count; i++) {
1475 + data = data << 1;
1476 + ixgbe_raise_eeprom_clk(hw, &eec);
1477 +
1478 + eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1479 +
1480 + eec &= ~(IXGBE_EEC_DI);
1481 + if (eec & IXGBE_EEC_DO)
1482 + data |= 1;
1483 +
1484 + ixgbe_lower_eeprom_clk(hw, &eec);
1485 + }
1486 +
1487 + return data;
1488 +}
1489 +
1490 +/**
1491 + * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1492 + * @hw: pointer to hardware structure
1493 + * @eec: EEC register's current value
1494 + **/
1495 +static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1496 +{
1497 + /*
1498 + * Raise the clock input to the EEPROM
1499 + * (setting the SK bit), then delay
1500 + */
1501 + *eec = *eec | IXGBE_EEC_SK;
1502 + IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1503 + IXGBE_WRITE_FLUSH(hw);
1504 + udelay(1);
1505 +}
1506 +
1507 +/**
1508 + * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
1509 + * @hw: pointer to hardware structure
1510 + * @eecd: EECD's current value
1511 + **/
1512 +static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1513 +{
1514 + /*
1515 + * Lower the clock input to the EEPROM (clearing the SK bit), then
1516 + * delay
1517 + */
1518 + *eec = *eec & ~IXGBE_EEC_SK;
1519 + IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1520 + IXGBE_WRITE_FLUSH(hw);
1521 + udelay(1);
1522 +}
1523 +
1524 +/**
1525 + * ixgbe_release_eeprom - Release EEPROM, release semaphores
1526 + * @hw: pointer to hardware structure
1527 + **/
1528 +static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1529 +{
1530 + u32 eec;
1531 +
1532 + eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1533 +
1534 + eec |= IXGBE_EEC_CS; /* Pull CS high */
1535 + eec &= ~IXGBE_EEC_SK; /* Lower SCK */
1536 +
1537 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1538 + IXGBE_WRITE_FLUSH(hw);
1539 +
1540 + udelay(1);
1541 +
1542 + /* Stop requesting EEPROM access */
1543 + eec &= ~IXGBE_EEC_REQ;
1544 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1545 +
1546 + ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1547 +}
1548 +
1549 +/**
1550 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum
1551 * @hw: pointer to hardware structure
1552 **/
1553 @@ -517,7 +872,7 @@ static u16 ixgbe_calc_eeprom_checksum(st
1554
1555 /* Include 0x0-0x3F in the checksum */
1556 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1557 - if (ixgbe_read_eeprom(hw, i, &word) != 0) {
1558 + if (hw->eeprom.ops.read(hw, i, &word) != 0) {
1559 hw_dbg(hw, "EEPROM read failed\n");
1560 break;
1561 }
1562 @@ -526,15 +881,15 @@ static u16 ixgbe_calc_eeprom_checksum(st
1563
1564 /* Include all data from pointers except for the fw pointer */
1565 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1566 - ixgbe_read_eeprom(hw, i, &pointer);
1567 + hw->eeprom.ops.read(hw, i, &pointer);
1568
1569 /* Make sure the pointer seems valid */
1570 if (pointer != 0xFFFF && pointer != 0) {
1571 - ixgbe_read_eeprom(hw, pointer, &length);
1572 + hw->eeprom.ops.read(hw, pointer, &length);
1573
1574 if (length != 0xFFFF && length != 0) {
1575 for (j = pointer+1; j <= pointer+length; j++) {
1576 - ixgbe_read_eeprom(hw, j, &word);
1577 + hw->eeprom.ops.read(hw, j, &word);
1578 checksum += word;
1579 }
1580 }
1581 @@ -547,14 +902,15 @@ static u16 ixgbe_calc_eeprom_checksum(st
1582 }
1583
1584 /**
1585 - * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum
1586 + * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
1587 * @hw: pointer to hardware structure
1588 * @checksum_val: calculated checksum
1589 *
1590 * Performs checksum calculation and validates the EEPROM checksum. If the
1591 * caller does not need checksum_val, the value can be NULL.
1592 **/
1593 -s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
1594 +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1595 + u16 *checksum_val)
1596 {
1597 s32 status;
1598 u16 checksum;
1599 @@ -565,12 +921,12 @@ s32 ixgbe_validate_eeprom_checksum(struc
1600 * not continue or we could be in for a very long wait while every
1601 * EEPROM read fails
1602 */
1603 - status = ixgbe_read_eeprom(hw, 0, &checksum);
1604 + status = hw->eeprom.ops.read(hw, 0, &checksum);
1605
1606 if (status == 0) {
1607 checksum = ixgbe_calc_eeprom_checksum(hw);
1608
1609 - ixgbe_read_eeprom(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1610 + hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1611
1612 /*
1613 * Verify read checksum from EEPROM is the same as
1614 @@ -590,6 +946,33 @@ s32 ixgbe_validate_eeprom_checksum(struc
1615 }
1616
1617 /**
1618 + * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
1619 + * @hw: pointer to hardware structure
1620 + **/
1621 +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1622 +{
1623 + s32 status;
1624 + u16 checksum;
1625 +
1626 + /*
1627 + * Read the first word from the EEPROM. If this times out or fails, do
1628 + * not continue or we could be in for a very long wait while every
1629 + * EEPROM read fails
1630 + */
1631 + status = hw->eeprom.ops.read(hw, 0, &checksum);
1632 +
1633 + if (status == 0) {
1634 + checksum = ixgbe_calc_eeprom_checksum(hw);
1635 + status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1636 + checksum);
1637 + } else {
1638 + hw_dbg(hw, "EEPROM read failed\n");
1639 + }
1640 +
1641 + return status;
1642 +}
1643 +
1644 +/**
1645 * ixgbe_validate_mac_addr - Validate MAC address
1646 * @mac_addr: pointer to MAC address.
1647 *
1648 @@ -607,61 +990,140 @@ s32 ixgbe_validate_mac_addr(u8 *mac_addr
1649 status = IXGBE_ERR_INVALID_MAC_ADDR;
1650 /* Reject the zero address */
1651 else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
1652 - mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
1653 + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
1654 status = IXGBE_ERR_INVALID_MAC_ADDR;
1655
1656 return status;
1657 }
1658
1659 /**
1660 - * ixgbe_set_rar - Set RX address register
1661 + * ixgbe_set_rar_generic - Set Rx address register
1662 * @hw: pointer to hardware structure
1663 - * @addr: Address to put into receive address register
1664 * @index: Receive address register to write
1665 - * @vind: Vind to set RAR to
1666 + * @addr: Address to put into receive address register
1667 + * @vmdq: VMDq "set" or "pool" index
1668 * @enable_addr: set flag that address is active
1669 *
1670 * Puts an ethernet address into a receive address register.
1671 **/
1672 -s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind,
1673 - u32 enable_addr)
1674 +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1675 + u32 enable_addr)
1676 {
1677 u32 rar_low, rar_high;
1678 + u32 rar_entries = hw->mac.num_rar_entries;
1679
1680 - /*
1681 - * HW expects these in little endian so we reverse the byte order from
1682 - * network order (big endian) to little endian
1683 - */
1684 - rar_low = ((u32)addr[0] |
1685 - ((u32)addr[1] << 8) |
1686 - ((u32)addr[2] << 16) |
1687 - ((u32)addr[3] << 24));
1688 -
1689 - rar_high = ((u32)addr[4] |
1690 - ((u32)addr[5] << 8) |
1691 - ((vind << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK));
1692 + /* setup VMDq pool selection before this RAR gets enabled */
1693 + hw->mac.ops.set_vmdq(hw, index, vmdq);
1694 +
1695 + /* Make sure we are using a valid rar index range */
1696 + if (index < rar_entries) {
1697 + /*
1698 + * HW expects these in little endian so we reverse the byte
1699 + * order from network order (big endian) to little endian
1700 + */
1701 + rar_low = ((u32)addr[0] |
1702 + ((u32)addr[1] << 8) |
1703 + ((u32)addr[2] << 16) |
1704 + ((u32)addr[3] << 24));
1705 + /*
1706 + * Some parts put the VMDq setting in the extra RAH bits,
1707 + * so save everything except the lower 16 bits that hold part
1708 + * of the address and the address valid bit.
1709 + */
1710 + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1711 + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1712 + rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1713
1714 - if (enable_addr != 0)
1715 - rar_high |= IXGBE_RAH_AV;
1716 + if (enable_addr != 0)
1717 + rar_high |= IXGBE_RAH_AV;
1718
1719 - IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1720 - IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1721 + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1722 + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1723 + } else {
1724 + hw_dbg(hw, "RAR index %d is out of range.\n", index);
1725 + }
1726
1727 return 0;
1728 }
1729
1730 /**
1731 - * ixgbe_init_rx_addrs - Initializes receive address filters.
1732 + * ixgbe_clear_rar_generic - Remove Rx address register
1733 + * @hw: pointer to hardware structure
1734 + * @index: Receive address register to write
1735 + *
1736 + * Clears an ethernet address from a receive address register.
1737 + **/
1738 +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1739 +{
1740 + u32 rar_high;
1741 + u32 rar_entries = hw->mac.num_rar_entries;
1742 +
1743 + /* Make sure we are using a valid rar index range */
1744 + if (index < rar_entries) {
1745 + /*
1746 + * Some parts put the VMDq setting in the extra RAH bits,
1747 + * so save everything except the lower 16 bits that hold part
1748 + * of the address and the address valid bit.
1749 + */
1750 + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1751 + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1752 +
1753 + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1754 + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1755 + } else {
1756 + hw_dbg(hw, "RAR index %d is out of range.\n", index);
1757 + }
1758 +
1759 + /* clear VMDq pool/queue selection for this RAR */
1760 + hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1761 +
1762 + return 0;
1763 +}
1764 +
1765 +/**
1766 + * ixgbe_enable_rar - Enable Rx address register
1767 + * @hw: pointer to hardware structure
1768 + * @index: index into the RAR table
1769 + *
1770 + * Enables the select receive address register.
1771 + **/
1772 +static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index)
1773 +{
1774 + u32 rar_high;
1775 +
1776 + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1777 + rar_high |= IXGBE_RAH_AV;
1778 + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1779 +}
1780 +
1781 +/**
1782 + * ixgbe_disable_rar - Disable Rx address register
1783 + * @hw: pointer to hardware structure
1784 + * @index: index into the RAR table
1785 + *
1786 + * Disables the select receive address register.
1787 + **/
1788 +static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index)
1789 +{
1790 + u32 rar_high;
1791 +
1792 + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1793 + rar_high &= (~IXGBE_RAH_AV);
1794 + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1795 +}
1796 +
1797 +/**
1798 + * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
1799 * @hw: pointer to hardware structure
1800 *
1801 * Places the MAC address in receive address register 0 and clears the rest
1802 - * of the receive addresss registers. Clears the multicast table. Assumes
1803 + * of the receive address registers. Clears the multicast table. Assumes
1804 * the receiver is in reset when the routine is called.
1805 **/
1806 -static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
1807 +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1808 {
1809 u32 i;
1810 - u32 rar_entries = hw->mac.num_rx_addrs;
1811 + u32 rar_entries = hw->mac.num_rar_entries;
1812
1813 /*
1814 * If the current mac address is valid, assume it is a software override
1815 @@ -671,29 +1133,30 @@ static s32 ixgbe_init_rx_addrs(struct ix
1816 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
1817 IXGBE_ERR_INVALID_MAC_ADDR) {
1818 /* Get the MAC address from the RAR0 for later reference */
1819 - ixgbe_get_mac_addr(hw, hw->mac.addr);
1820 + hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1821
1822 hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
1823 - hw->mac.addr[0], hw->mac.addr[1],
1824 - hw->mac.addr[2]);
1825 + hw->mac.addr[0], hw->mac.addr[1],
1826 + hw->mac.addr[2]);
1827 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
1828 - hw->mac.addr[4], hw->mac.addr[5]);
1829 + hw->mac.addr[4], hw->mac.addr[5]);
1830 } else {
1831 /* Setup the receive address. */
1832 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
1833 hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ",
1834 - hw->mac.addr[0], hw->mac.addr[1],
1835 - hw->mac.addr[2]);
1836 + hw->mac.addr[0], hw->mac.addr[1],
1837 + hw->mac.addr[2]);
1838 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
1839 - hw->mac.addr[4], hw->mac.addr[5]);
1840 + hw->mac.addr[4], hw->mac.addr[5]);
1841
1842 - ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1843 + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1844 }
1845 + hw->addr_ctrl.overflow_promisc = 0;
1846
1847 hw->addr_ctrl.rar_used_count = 1;
1848
1849 /* Zero out the other receive addresses. */
1850 - hw_dbg(hw, "Clearing RAR[1-15]\n");
1851 + hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
1852 for (i = 1; i < rar_entries; i++) {
1853 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
1854 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
1855 @@ -705,9 +1168,113 @@ static s32 ixgbe_init_rx_addrs(struct ix
1856 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1857
1858 hw_dbg(hw, " Clearing MTA\n");
1859 - for (i = 0; i < IXGBE_MC_TBL_SIZE; i++)
1860 + for (i = 0; i < hw->mac.mcft_size; i++)
1861 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1862
1863 + if (hw->mac.ops.init_uta_tables)
1864 + hw->mac.ops.init_uta_tables(hw);
1865 +
1866 + return 0;
1867 +}
1868 +
1869 +/**
1870 + * ixgbe_add_uc_addr - Adds a secondary unicast address.
1871 + * @hw: pointer to hardware structure
1872 + * @addr: new address
1873 + *
1874 + * Adds it to unused receive address register or goes into promiscuous mode.
1875 + **/
1876 +static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1877 +{
1878 + u32 rar_entries = hw->mac.num_rar_entries;
1879 + u32 rar;
1880 +
1881 + hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
1882 + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1883 +
1884 + /*
1885 + * Place this address in the RAR if there is room,
1886 + * else put the controller into promiscuous mode
1887 + */
1888 + if (hw->addr_ctrl.rar_used_count < rar_entries) {
1889 + rar = hw->addr_ctrl.rar_used_count -
1890 + hw->addr_ctrl.mc_addr_in_rar_count;
1891 + hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
1892 + hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
1893 + hw->addr_ctrl.rar_used_count++;
1894 + } else {
1895 + hw->addr_ctrl.overflow_promisc++;
1896 + }
1897 +
1898 + hw_dbg(hw, "ixgbe_add_uc_addr Complete\n");
1899 +}
1900 +
1901 +/**
1902 + * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1903 + * @hw: pointer to hardware structure
1904 + * @addr_list: the list of new addresses
1905 + * @addr_count: number of addresses
1906 + * @next: iterator function to walk the address list
1907 + *
1908 + * The given list replaces any existing list. Clears the secondary addrs from
1909 + * receive address registers. Uses unused receive address registers for the
1910 + * first secondary addresses, and falls back to promiscuous mode as needed.
1911 + *
1912 + * Drivers using secondary unicast addresses must set user_set_promisc when
1913 + * manually putting the device into promiscuous mode.
1914 + **/
1915 +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
1916 + u32 addr_count, ixgbe_mc_addr_itr next)
1917 +{
1918 + u8 *addr;
1919 + u32 i;
1920 + u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
1921 + u32 uc_addr_in_use;
1922 + u32 fctrl;
1923 + u32 vmdq;
1924 +
1925 + /*
1926 + * Clear accounting of old secondary address list,
1927 + * don't count RAR[0]
1928 + */
1929 + uc_addr_in_use = hw->addr_ctrl.rar_used_count -
1930 + hw->addr_ctrl.mc_addr_in_rar_count - 1;
1931 + hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
1932 + hw->addr_ctrl.overflow_promisc = 0;
1933 +
1934 + /* Zero out the other receive addresses */
1935 + hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use);
1936 + for (i = 1; i <= uc_addr_in_use; i++) {
1937 + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
1938 + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
1939 + }
1940 +
1941 + /* Add the new addresses */
1942 + for (i = 0; i < addr_count; i++) {
1943 + hw_dbg(hw, " Adding the secondary addresses:\n");
1944 + addr = next(hw, &addr_list, &vmdq);
1945 + ixgbe_add_uc_addr(hw, addr, vmdq);
1946 + }
1947 +
1948 + if (hw->addr_ctrl.overflow_promisc) {
1949 + /* enable promisc if not already in overflow or set by user */
1950 + if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1951 + hw_dbg(hw, " Entering address overflow promisc mode\n");
1952 + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1953 + fctrl |= IXGBE_FCTRL_UPE;
1954 + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1955 + }
1956 + } else {
1957 + /* only disable if set by overflow, not by user */
1958 + if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1959 + hw_dbg(hw, " Leaving address overflow promisc mode\n");
1960 + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1961 + fctrl &= ~IXGBE_FCTRL_UPE;
1962 + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1963 + }
1964 + }
1965 +
1966 + hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
1967 return 0;
1968 }
1969
1970 @@ -720,7 +1287,7 @@ static s32 ixgbe_init_rx_addrs(struct ix
1971 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
1972 * incoming rx multicast addresses, to determine the bit-vector to check in
1973 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
1974 - * by the MO field of the MCSTCTRL. The MO field is set during initalization
1975 + * by the MO field of the MCSTCTRL. The MO field is set during initialization
1976 * to mc_filter_type.
1977 **/
1978 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
1979 @@ -728,19 +1295,19 @@ static s32 ixgbe_mta_vector(struct ixgbe
1980 u32 vector = 0;
1981
1982 switch (hw->mac.mc_filter_type) {
1983 - case 0: /* use bits [47:36] of the address */
1984 + case 0: /* use bits [47:36] of the address */
1985 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
1986 break;
1987 - case 1: /* use bits [46:35] of the address */
1988 + case 1: /* use bits [46:35] of the address */
1989 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
1990 break;
1991 - case 2: /* use bits [45:34] of the address */
1992 + case 2: /* use bits [45:34] of the address */
1993 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
1994 break;
1995 - case 3: /* use bits [43:32] of the address */
1996 + case 3: /* use bits [43:32] of the address */
1997 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
1998 break;
1999 - default: /* Invalid mc_filter_type */
2000 + default: /* Invalid mc_filter_type */
2001 hw_dbg(hw, "MC filter type param set incorrectly\n");
2002 break;
2003 }
2004 @@ -794,21 +1361,22 @@ static void ixgbe_set_mta(struct ixgbe_h
2005 **/
2006 static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr)
2007 {
2008 - u32 rar_entries = hw->mac.num_rx_addrs;
2009 + u32 rar_entries = hw->mac.num_rar_entries;
2010 + u32 rar;
2011
2012 hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n",
2013 - mc_addr[0], mc_addr[1], mc_addr[2],
2014 - mc_addr[3], mc_addr[4], mc_addr[5]);
2015 + mc_addr[0], mc_addr[1], mc_addr[2],
2016 + mc_addr[3], mc_addr[4], mc_addr[5]);
2017
2018 /*
2019 * Place this multicast address in the RAR if there is room,
2020 * else put it in the MTA
2021 */
2022 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2023 - ixgbe_set_rar(hw, hw->addr_ctrl.rar_used_count,
2024 - mc_addr, 0, IXGBE_RAH_AV);
2025 - hw_dbg(hw, "Added a multicast address to RAR[%d]\n",
2026 - hw->addr_ctrl.rar_used_count);
2027 + /* use RAR from the end up for multicast */
2028 + rar = rar_entries - hw->addr_ctrl.mc_addr_in_rar_count - 1;
2029 + hw->mac.ops.set_rar(hw, rar, mc_addr, 0, IXGBE_RAH_AV);
2030 + hw_dbg(hw, "Added a multicast address to RAR[%d]\n", rar);
2031 hw->addr_ctrl.rar_used_count++;
2032 hw->addr_ctrl.mc_addr_in_rar_count++;
2033 } else {
2034 @@ -819,22 +1387,23 @@ static void ixgbe_add_mc_addr(struct ixg
2035 }
2036
2037 /**
2038 - * ixgbe_update_mc_addr_list - Updates MAC list of multicast addresses
2039 + * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2040 * @hw: pointer to hardware structure
2041 * @mc_addr_list: the list of new multicast addresses
2042 * @mc_addr_count: number of addresses
2043 - * @pad: number of bytes between addresses in the list
2044 + * @next: iterator function to walk the multicast address list
2045 *
2046 * The given list replaces any existing list. Clears the MC addrs from receive
2047 - * address registers and the multicast table. Uses unsed receive address
2048 + * address registers and the multicast table. Uses unused receive address
2049 * registers for the first multicast addresses, and hashes the rest into the
2050 * multicast table.
2051 **/
2052 -s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
2053 - u32 mc_addr_count, u32 pad)
2054 +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2055 + u32 mc_addr_count, ixgbe_mc_addr_itr next)
2056 {
2057 u32 i;
2058 - u32 rar_entries = hw->mac.num_rx_addrs;
2059 + u32 rar_entries = hw->mac.num_rar_entries;
2060 + u32 vmdq;
2061
2062 /*
2063 * Set the new number of MC addresses that we are being requested to
2064 @@ -846,7 +1415,8 @@ s32 ixgbe_update_mc_addr_list(struct ixg
2065 hw->addr_ctrl.mta_in_use = 0;
2066
2067 /* Zero out the other receive addresses. */
2068 - hw_dbg(hw, "Clearing RAR[1-15]\n");
2069 + hw_dbg(hw, "Clearing RAR[%d-%d]\n", hw->addr_ctrl.rar_used_count,
2070 + rar_entries - 1);
2071 for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) {
2072 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2073 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2074 @@ -854,186 +1424,67 @@ s32 ixgbe_update_mc_addr_list(struct ixg
2075
2076 /* Clear the MTA */
2077 hw_dbg(hw, " Clearing MTA\n");
2078 - for (i = 0; i < IXGBE_MC_TBL_SIZE; i++)
2079 + for (i = 0; i < hw->mac.mcft_size; i++)
2080 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2081
2082 /* Add the new addresses */
2083 for (i = 0; i < mc_addr_count; i++) {
2084 hw_dbg(hw, " Adding the multicast addresses:\n");
2085 - ixgbe_add_mc_addr(hw, mc_addr_list +
2086 - (i * (IXGBE_ETH_LENGTH_OF_ADDRESS + pad)));
2087 + ixgbe_add_mc_addr(hw, next(hw, &mc_addr_list, &vmdq));
2088 }
2089
2090 /* Enable mta */
2091 if (hw->addr_ctrl.mta_in_use > 0)
2092 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2093 - IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2094 + IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2095
2096 - hw_dbg(hw, "ixgbe_update_mc_addr_list Complete\n");
2097 + hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
2098 return 0;
2099 }
2100
2101 /**
2102 - * ixgbe_clear_vfta - Clear VLAN filter table
2103 + * ixgbe_enable_mc_generic - Enable multicast address in RAR
2104 * @hw: pointer to hardware structure
2105 *
2106 - * Clears the VLAN filer table, and the VMDq index associated with the filter
2107 + * Enables multicast address in RAR and the use of the multicast hash table.
2108 **/
2109 -static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw)
2110 +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2111 {
2112 - u32 offset;
2113 - u32 vlanbyte;
2114 -
2115 - for (offset = 0; offset < IXGBE_VLAN_FILTER_TBL_SIZE; offset++)
2116 - IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
2117 -
2118 - for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
2119 - for (offset = 0; offset < IXGBE_VLAN_FILTER_TBL_SIZE; offset++)
2120 - IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
2121 - 0);
2122 -
2123 - return 0;
2124 -}
2125 + u32 i;
2126 + u32 rar_entries = hw->mac.num_rar_entries;
2127 + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2128
2129 -/**
2130 - * ixgbe_set_vfta - Set VLAN filter table
2131 - * @hw: pointer to hardware structure
2132 - * @vlan: VLAN id to write to VLAN filter
2133 - * @vind: VMDq output index that maps queue to VLAN id in VFTA
2134 - * @vlan_on: boolean flag to turn on/off VLAN in VFTA
2135 - *
2136 - * Turn on/off specified VLAN in the VLAN filter table.
2137 - **/
2138 -s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind,
2139 - bool vlan_on)
2140 -{
2141 - u32 VftaIndex;
2142 - u32 BitOffset;
2143 - u32 VftaReg;
2144 - u32 VftaByte;
2145 -
2146 - /* Determine 32-bit word position in array */
2147 - VftaIndex = (vlan >> 5) & 0x7F; /* upper seven bits */
2148 -
2149 - /* Determine the location of the (VMD) queue index */
2150 - VftaByte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
2151 - BitOffset = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
2152 -
2153 - /* Set the nibble for VMD queue index */
2154 - VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex));
2155 - VftaReg &= (~(0x0F << BitOffset));
2156 - VftaReg |= (vind << BitOffset);
2157 - IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex), VftaReg);
2158 -
2159 - /* Determine the location of the bit for this VLAN id */
2160 - BitOffset = vlan & 0x1F; /* lower five bits */
2161 -
2162 - VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTA(VftaIndex));
2163 - if (vlan_on)
2164 - /* Turn on this VLAN id */
2165 - VftaReg |= (1 << BitOffset);
2166 - else
2167 - /* Turn off this VLAN id */
2168 - VftaReg &= ~(1 << BitOffset);
2169 - IXGBE_WRITE_REG(hw, IXGBE_VFTA(VftaIndex), VftaReg);
2170 + if (a->mc_addr_in_rar_count > 0)
2171 + for (i = (rar_entries - a->mc_addr_in_rar_count);
2172 + i < rar_entries; i++)
2173 + ixgbe_enable_rar(hw, i);
2174 +
2175 + if (a->mta_in_use > 0)
2176 + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2177 + hw->mac.mc_filter_type);
2178
2179 return 0;
2180 }
2181
2182 /**
2183 - * ixgbe_setup_fc - Configure flow control settings
2184 + * ixgbe_disable_mc_generic - Disable multicast address in RAR
2185 * @hw: pointer to hardware structure
2186 - * @packetbuf_num: packet buffer number (0-7)
2187 *
2188 - * Configures the flow control settings based on SW configuration.
2189 - * This function is used for 802.3x flow control configuration only.
2190 + * Disables multicast address in RAR and the use of the multicast hash table.
2191 **/
2192 -s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2193 +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2194 {
2195 - u32 frctl_reg;
2196 - u32 rmcs_reg;
2197 -
2198 - if (packetbuf_num < 0 || packetbuf_num > 7)
2199 - hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
2200 - "is 0-7\n", packetbuf_num);
2201 -
2202 - frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2203 - frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
2204 -
2205 - rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
2206 - rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
2207 -
2208 - /*
2209 - * We want to save off the original Flow Control configuration just in
2210 - * case we get disconnected and then reconnected into a different hub
2211 - * or switch with different Flow Control capabilities.
2212 - */
2213 - hw->fc.type = hw->fc.original_type;
2214 -
2215 - /*
2216 - * The possible values of the "flow_control" parameter are:
2217 - * 0: Flow control is completely disabled
2218 - * 1: Rx flow control is enabled (we can receive pause frames but not
2219 - * send pause frames).
2220 - * 2: Tx flow control is enabled (we can send pause frames but we do not
2221 - * support receiving pause frames)
2222 - * 3: Both Rx and TX flow control (symmetric) are enabled.
2223 - * other: Invalid.
2224 - */
2225 - switch (hw->fc.type) {
2226 - case ixgbe_fc_none:
2227 - break;
2228 - case ixgbe_fc_rx_pause:
2229 - /*
2230 - * RX Flow control is enabled,
2231 - * and TX Flow control is disabled.
2232 - */
2233 - frctl_reg |= IXGBE_FCTRL_RFCE;
2234 - break;
2235 - case ixgbe_fc_tx_pause:
2236 - /*
2237 - * TX Flow control is enabled, and RX Flow control is disabled,
2238 - * by a software over-ride.
2239 - */
2240 - rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
2241 - break;
2242 - case ixgbe_fc_full:
2243 - /*
2244 - * Flow control (both RX and TX) is enabled by a software
2245 - * over-ride.
2246 - */
2247 - frctl_reg |= IXGBE_FCTRL_RFCE;
2248 - rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
2249 - break;
2250 - default:
2251 - /* We should never get here. The value should be 0-3. */
2252 - hw_dbg(hw, "Flow control param set incorrectly\n");
2253 - break;
2254 - }
2255 -
2256 - /* Enable 802.3x based flow control settings. */
2257 - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg);
2258 - IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
2259 + u32 i;
2260 + u32 rar_entries = hw->mac.num_rar_entries;
2261 + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2262
2263 - /*
2264 - * We need to set up the Receive Threshold high and low water
2265 - * marks as well as (optionally) enabling the transmission of
2266 - * XON frames.
2267 - */
2268 - if (hw->fc.type & ixgbe_fc_tx_pause) {
2269 - if (hw->fc.send_xon) {
2270 - IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
2271 - (hw->fc.low_water | IXGBE_FCRTL_XONE));
2272 - } else {
2273 - IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
2274 - hw->fc.low_water);
2275 - }
2276 - IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
2277 - (hw->fc.high_water)|IXGBE_FCRTH_FCEN);
2278 - }
2279 + if (a->mc_addr_in_rar_count > 0)
2280 + for (i = (rar_entries - a->mc_addr_in_rar_count);
2281 + i < rar_entries; i++)
2282 + ixgbe_disable_rar(hw, i);
2283
2284 - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time);
2285 - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
2286 + if (a->mta_in_use > 0)
2287 + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2288
2289 return 0;
2290 }
2291 @@ -1049,13 +1500,24 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw,
2292 **/
2293 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2294 {
2295 - u32 ctrl;
2296 - s32 i;
2297 + u32 i;
2298 + u32 reg_val;
2299 + u32 number_of_queues;
2300 s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2301
2302 - ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
2303 - ctrl |= IXGBE_CTRL_GIO_DIS;
2304 - IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
2305 + /* Disable the receive unit by stopping each queue */
2306 + number_of_queues = hw->mac.max_rx_queues;
2307 + for (i = 0; i < number_of_queues; i++) {
2308 + reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
2309 + if (reg_val & IXGBE_RXDCTL_ENABLE) {
2310 + reg_val &= ~IXGBE_RXDCTL_ENABLE;
2311 + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
2312 + }
2313 + }
2314 +
2315 + reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL);
2316 + reg_val |= IXGBE_CTRL_GIO_DIS;
2317 + IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
2318
2319 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2320 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) {
2321 @@ -1070,11 +1532,11 @@ s32 ixgbe_disable_pcie_master(struct ixg
2322
2323
2324 /**
2325 - * ixgbe_acquire_swfw_sync - Aquire SWFW semaphore
2326 + * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
2327 * @hw: pointer to hardware structure
2328 - * @mask: Mask to specify wich semaphore to acquire
2329 + * @mask: Mask to specify which semaphore to acquire
2330 *
2331 - * Aquires the SWFW semaphore throught the GSSR register for the specified
2332 + * Acquires the SWFW semaphore thought the GSSR register for the specified
2333 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2334 **/
2335 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2336 @@ -1116,9 +1578,9 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe
2337 /**
2338 * ixgbe_release_swfw_sync - Release SWFW semaphore
2339 * @hw: pointer to hardware structure
2340 - * @mask: Mask to specify wich semaphore to release
2341 + * @mask: Mask to specify which semaphore to release
2342 *
2343 - * Releases the SWFW semaphore throught the GSSR register for the specified
2344 + * Releases the SWFW semaphore thought the GSSR register for the specified
2345 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2346 **/
2347 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2348 @@ -1135,45 +1597,3 @@ void ixgbe_release_swfw_sync(struct ixgb
2349 ixgbe_release_eeprom_semaphore(hw);
2350 }
2351
2352 -/**
2353 - * ixgbe_read_analog_reg8 - Reads 8 bit Atlas analog register
2354 - * @hw: pointer to hardware structure
2355 - * @reg: analog register to read
2356 - * @val: read value
2357 - *
2358 - * Performs write operation to analog register specified.
2359 - **/
2360 -s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val)
2361 -{
2362 - u32 atlas_ctl;
2363 -
2364 - IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
2365 - IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
2366 - IXGBE_WRITE_FLUSH(hw);
2367 - udelay(10);
2368 - atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
2369 - *val = (u8)atlas_ctl;
2370 -
2371 - return 0;
2372 -}
2373 -
2374 -/**
2375 - * ixgbe_write_analog_reg8 - Writes 8 bit Atlas analog register
2376 - * @hw: pointer to hardware structure
2377 - * @reg: atlas register to write
2378 - * @val: value to write
2379 - *
2380 - * Performs write operation to Atlas analog register specified.
2381 - **/
2382 -s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
2383 -{
2384 - u32 atlas_ctl;
2385 -
2386 - atlas_ctl = (reg << 8) | val;
2387 - IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
2388 - IXGBE_WRITE_FLUSH(hw);
2389 - udelay(10);
2390 -
2391 - return 0;
2392 -}
2393 -
2394 --- a/drivers/net/ixgbe/ixgbe_common.h
2395 +++ b/drivers/net/ixgbe/ixgbe_common.h
2396 @@ -1,7 +1,7 @@
2397 /*******************************************************************************
2398
2399 Intel 10 Gigabit PCI Express Linux driver
2400 - Copyright(c) 1999 - 2007 Intel Corporation.
2401 + Copyright(c) 1999 - 2008 Intel Corporation.
2402
2403 This program is free software; you can redistribute it and/or modify it
2404 under the terms and conditions of the GNU General Public License,
2405 @@ -20,7 +20,6 @@
2406 the file called "COPYING".
2407
2408 Contact Information:
2409 - Linux NICS <linux.nics@intel.com>
2410 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
2411 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
2412
2413 @@ -31,34 +30,45 @@
2414
2415 #include "ixgbe_type.h"
2416
2417 -s32 ixgbe_init_hw(struct ixgbe_hw *hw);
2418 -s32 ixgbe_start_hw(struct ixgbe_hw *hw);
2419 -s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr);
2420 -s32 ixgbe_stop_adapter(struct ixgbe_hw *hw);
2421 -s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num);
2422 -
2423 -s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
2424 -s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
2425 -
2426 -s32 ixgbe_init_eeprom(struct ixgbe_hw *hw);
2427 -s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
2428 -s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
2429 -
2430 -s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind,
2431 - u32 enable_addr);
2432 -s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
2433 - u32 mc_addr_count, u32 pad);
2434 -s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
2435 -s32 ixgbe_validate_mac_addr(u8 *mac_addr);
2436 -
2437 -s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packtetbuf_num);
2438 +s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
2439 +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
2440 +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
2441 +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
2442 +s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
2443 +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
2444 +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
2445 +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
2446 +
2447 +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
2448 +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
2449 +
2450 +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
2451 +s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
2452 +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
2453 + u16 *data);
2454 +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2455 + u16 *checksum_val);
2456 +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
2457 +
2458 +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2459 + u32 enable_addr);
2460 +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
2461 +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
2462 +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2463 + u32 mc_addr_count,
2464 + ixgbe_mc_addr_itr func);
2465 +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2466 + u32 addr_count, ixgbe_mc_addr_itr func);
2467 +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
2468 +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
2469
2470 +s32 ixgbe_validate_mac_addr(u8 *mac_addr);
2471 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
2472 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
2473 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
2474
2475 -s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
2476 -s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
2477 +s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val);
2478 +s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val);
2479
2480 #define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
2481
2482 --- a/drivers/net/ixgbe/ixgbe_ethtool.c
2483 +++ b/drivers/net/ixgbe/ixgbe_ethtool.c
2484 @@ -1,7 +1,7 @@
2485 /*******************************************************************************
2486
2487 Intel 10 Gigabit PCI Express Linux driver
2488 - Copyright(c) 1999 - 2007 Intel Corporation.
2489 + Copyright(c) 1999 - 2008 Intel Corporation.
2490
2491 This program is free software; you can redistribute it and/or modify it
2492 under the terms and conditions of the GNU General Public License,
2493 @@ -20,7 +20,6 @@
2494 the file called "COPYING".
2495
2496 Contact Information:
2497 - Linux NICS <linux.nics@intel.com>
2498 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
2499 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
2500
2501 @@ -48,7 +47,7 @@ struct ixgbe_stats {
2502 };
2503
2504 #define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \
2505 - offsetof(struct ixgbe_adapter, m)
2506 + offsetof(struct ixgbe_adapter, m)
2507 static struct ixgbe_stats ixgbe_gstrings_stats[] = {
2508 {"rx_packets", IXGBE_STAT(net_stats.rx_packets)},
2509 {"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
2510 @@ -90,19 +89,22 @@ static struct ixgbe_stats ixgbe_gstrings
2511 {"rx_header_split", IXGBE_STAT(rx_hdr_split)},
2512 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
2513 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
2514 +#ifdef CONFIG_IXGBE_LRO
2515 {"lro_aggregated", IXGBE_STAT(lro_aggregated)},
2516 {"lro_flushed", IXGBE_STAT(lro_flushed)},
2517 +#endif
2518 };
2519
2520 #define IXGBE_QUEUE_STATS_LEN \
2521 - ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \
2522 - ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \
2523 - (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
2524 -#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
2525 + ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \
2526 + ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \
2527 + (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
2528 +#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
2529 +#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
2530 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
2531
2532 static int ixgbe_get_settings(struct net_device *netdev,
2533 - struct ethtool_cmd *ecmd)
2534 + struct ethtool_cmd *ecmd)
2535 {
2536 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2537 struct ixgbe_hw *hw = &adapter->hw;
2538 @@ -114,7 +116,7 @@ static int ixgbe_get_settings(struct net
2539 ecmd->transceiver = XCVR_EXTERNAL;
2540 if (hw->phy.media_type == ixgbe_media_type_copper) {
2541 ecmd->supported |= (SUPPORTED_1000baseT_Full |
2542 - SUPPORTED_TP | SUPPORTED_Autoneg);
2543 + SUPPORTED_TP | SUPPORTED_Autoneg);
2544
2545 ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg);
2546 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
2547 @@ -126,14 +128,15 @@ static int ixgbe_get_settings(struct net
2548 } else {
2549 ecmd->supported |= SUPPORTED_FIBRE;
2550 ecmd->advertising = (ADVERTISED_10000baseT_Full |
2551 - ADVERTISED_FIBRE);
2552 + ADVERTISED_FIBRE);
2553 ecmd->port = PORT_FIBRE;
2554 + ecmd->autoneg = AUTONEG_DISABLE;
2555 }
2556
2557 - adapter->hw.mac.ops.check_link(hw, &(link_speed), &link_up);
2558 + hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2559 if (link_up) {
2560 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2561 - SPEED_10000 : SPEED_1000;
2562 + SPEED_10000 : SPEED_1000;
2563 ecmd->duplex = DUPLEX_FULL;
2564 } else {
2565 ecmd->speed = -1;
2566 @@ -144,7 +147,7 @@ static int ixgbe_get_settings(struct net
2567 }
2568
2569 static int ixgbe_set_settings(struct net_device *netdev,
2570 - struct ethtool_cmd *ecmd)
2571 + struct ethtool_cmd *ecmd)
2572 {
2573 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2574 struct ixgbe_hw *hw = &adapter->hw;
2575 @@ -164,7 +167,7 @@ static int ixgbe_set_settings(struct net
2576 }
2577
2578 static void ixgbe_get_pauseparam(struct net_device *netdev,
2579 - struct ethtool_pauseparam *pause)
2580 + struct ethtool_pauseparam *pause)
2581 {
2582 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2583 struct ixgbe_hw *hw = &adapter->hw;
2584 @@ -182,7 +185,7 @@ static void ixgbe_get_pauseparam(struct
2585 }
2586
2587 static int ixgbe_set_pauseparam(struct net_device *netdev,
2588 - struct ethtool_pauseparam *pause)
2589 + struct ethtool_pauseparam *pause)
2590 {
2591 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2592 struct ixgbe_hw *hw = &adapter->hw;
2593 @@ -233,15 +236,15 @@ static int ixgbe_set_rx_csum(struct net_
2594
2595 static u32 ixgbe_get_tx_csum(struct net_device *netdev)
2596 {
2597 - return (netdev->features & NETIF_F_HW_CSUM) != 0;
2598 + return (netdev->features & NETIF_F_IP_CSUM) != 0;
2599 }
2600
2601 static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
2602 {
2603 if (data)
2604 - netdev->features |= NETIF_F_HW_CSUM;
2605 + netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
2606 else
2607 - netdev->features &= ~NETIF_F_HW_CSUM;
2608 + netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
2609
2610 return 0;
2611 }
2612 @@ -281,7 +284,7 @@ static int ixgbe_get_regs_len(struct net
2613 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
2614
2615 static void ixgbe_get_regs(struct net_device *netdev,
2616 - struct ethtool_regs *regs, void *p)
2617 + struct ethtool_regs *regs, void *p)
2618 {
2619 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2620 struct ixgbe_hw *hw = &adapter->hw;
2621 @@ -315,7 +318,9 @@ static void ixgbe_get_regs(struct net_de
2622 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
2623
2624 /* Interrupt */
2625 - regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICR);
2626 + /* don't read EICR because it can clear interrupt causes, instead
2627 + * read EICS which is a shadow but doesn't clear EICR */
2628 + regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
2629 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
2630 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
2631 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
2632 @@ -325,7 +330,7 @@ static void ixgbe_get_regs(struct net_de
2633 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
2634 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
2635 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
2636 - regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL);
2637 + regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
2638 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
2639
2640 /* Flow Control */
2641 @@ -371,7 +376,7 @@ static void ixgbe_get_regs(struct net_de
2642 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
2643 for (i = 0; i < 16; i++)
2644 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
2645 - regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE);
2646 + regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
2647 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2648 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2649 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
2650 @@ -419,7 +424,6 @@ static void ixgbe_get_regs(struct net_de
2651 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
2652 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT);
2653
2654 - /* DCE */
2655 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
2656 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
2657 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
2658 @@ -539,21 +543,17 @@ static void ixgbe_get_regs(struct net_de
2659 /* Diagnostic */
2660 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
2661 for (i = 0; i < 8; i++)
2662 - regs_buff[1072] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
2663 + regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
2664 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
2665 - regs_buff[1081] = IXGBE_READ_REG(hw, IXGBE_RIC_DW0);
2666 - regs_buff[1082] = IXGBE_READ_REG(hw, IXGBE_RIC_DW1);
2667 - regs_buff[1083] = IXGBE_READ_REG(hw, IXGBE_RIC_DW2);
2668 - regs_buff[1084] = IXGBE_READ_REG(hw, IXGBE_RIC_DW3);
2669 + for (i = 0; i < 4; i++)
2670 + regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
2671 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
2672 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
2673 for (i = 0; i < 8; i++)
2674 - regs_buff[1087] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
2675 + regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
2676 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
2677 - regs_buff[1096] = IXGBE_READ_REG(hw, IXGBE_TIC_DW0);
2678 - regs_buff[1097] = IXGBE_READ_REG(hw, IXGBE_TIC_DW1);
2679 - regs_buff[1098] = IXGBE_READ_REG(hw, IXGBE_TIC_DW2);
2680 - regs_buff[1099] = IXGBE_READ_REG(hw, IXGBE_TIC_DW3);
2681 + for (i = 0; i < 4; i++)
2682 + regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
2683 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
2684 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
2685 regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
2686 @@ -566,7 +566,7 @@ static void ixgbe_get_regs(struct net_de
2687 regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
2688 regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
2689 for (i = 0; i < 8; i++)
2690 - regs_buff[1111] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
2691 + regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
2692 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
2693 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
2694 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
2695 @@ -585,7 +585,7 @@ static int ixgbe_get_eeprom_len(struct n
2696 }
2697
2698 static int ixgbe_get_eeprom(struct net_device *netdev,
2699 - struct ethtool_eeprom *eeprom, u8 *bytes)
2700 + struct ethtool_eeprom *eeprom, u8 *bytes)
2701 {
2702 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2703 struct ixgbe_hw *hw = &adapter->hw;
2704 @@ -608,8 +608,8 @@ static int ixgbe_get_eeprom(struct net_d
2705 return -ENOMEM;
2706
2707 for (i = 0; i < eeprom_len; i++) {
2708 - if ((ret_val = ixgbe_read_eeprom(hw, first_word + i,
2709 - &eeprom_buff[i])))
2710 + if ((ret_val = hw->eeprom.ops.read(hw, first_word + i,
2711 + &eeprom_buff[i])))
2712 break;
2713 }
2714
2715 @@ -624,7 +624,7 @@ static int ixgbe_get_eeprom(struct net_d
2716 }
2717
2718 static void ixgbe_get_drvinfo(struct net_device *netdev,
2719 - struct ethtool_drvinfo *drvinfo)
2720 + struct ethtool_drvinfo *drvinfo)
2721 {
2722 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2723
2724 @@ -637,7 +637,7 @@ static void ixgbe_get_drvinfo(struct net
2725 }
2726
2727 static void ixgbe_get_ringparam(struct net_device *netdev,
2728 - struct ethtool_ringparam *ring)
2729 + struct ethtool_ringparam *ring)
2730 {
2731 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2732 struct ixgbe_ring *tx_ring = adapter->tx_ring;
2733 @@ -654,15 +654,12 @@ static void ixgbe_get_ringparam(struct n
2734 }
2735
2736 static int ixgbe_set_ringparam(struct net_device *netdev,
2737 - struct ethtool_ringparam *ring)
2738 + struct ethtool_ringparam *ring)
2739 {
2740 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2741 - struct ixgbe_tx_buffer *old_buf;
2742 - struct ixgbe_rx_buffer *old_rx_buf;
2743 - void *old_desc;
2744 + struct ixgbe_ring *temp_ring;
2745 int i, err;
2746 - u32 new_rx_count, new_tx_count, old_size;
2747 - dma_addr_t old_dma;
2748 + u32 new_rx_count, new_tx_count;
2749
2750 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2751 return -EINVAL;
2752 @@ -681,6 +678,15 @@ static int ixgbe_set_ringparam(struct ne
2753 return 0;
2754 }
2755
2756 + if (adapter->num_tx_queues > adapter->num_rx_queues)
2757 + temp_ring = vmalloc(adapter->num_tx_queues *
2758 + sizeof(struct ixgbe_ring));
2759 + else
2760 + temp_ring = vmalloc(adapter->num_rx_queues *
2761 + sizeof(struct ixgbe_ring));
2762 + if (!temp_ring)
2763 + return -ENOMEM;
2764 +
2765 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
2766 msleep(1);
2767
2768 @@ -693,66 +699,61 @@ static int ixgbe_set_ringparam(struct ne
2769 * to the tx and rx ring structs.
2770 */
2771 if (new_tx_count != adapter->tx_ring->count) {
2772 + memcpy(temp_ring, adapter->tx_ring,
2773 + adapter->num_tx_queues * sizeof(struct ixgbe_ring));
2774 +
2775 for (i = 0; i < adapter->num_tx_queues; i++) {
2776 - /* Save existing descriptor ring */
2777 - old_buf = adapter->tx_ring[i].tx_buffer_info;
2778 - old_desc = adapter->tx_ring[i].desc;
2779 - old_size = adapter->tx_ring[i].size;
2780 - old_dma = adapter->tx_ring[i].dma;
2781 - /* Try to allocate a new one */
2782 - adapter->tx_ring[i].tx_buffer_info = NULL;
2783 - adapter->tx_ring[i].desc = NULL;
2784 - adapter->tx_ring[i].count = new_tx_count;
2785 - err = ixgbe_setup_tx_resources(adapter,
2786 - &adapter->tx_ring[i]);
2787 + temp_ring[i].count = new_tx_count;
2788 + err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]);
2789 if (err) {
2790 - /* Restore the old one so at least
2791 - the adapter still works, even if
2792 - we failed the request */
2793 - adapter->tx_ring[i].tx_buffer_info = old_buf;
2794 - adapter->tx_ring[i].desc = old_desc;
2795 - adapter->tx_ring[i].size = old_size;
2796 - adapter->tx_ring[i].dma = old_dma;
2797 + while (i) {
2798 + i--;
2799 + ixgbe_free_tx_resources(adapter,
2800 + &temp_ring[i]);
2801 + }
2802 goto err_setup;
2803 }
2804 - /* Free the old buffer manually */
2805 - vfree(old_buf);
2806 - pci_free_consistent(adapter->pdev, old_size,
2807 - old_desc, old_dma);
2808 }
2809 +
2810 + for (i = 0; i < adapter->num_tx_queues; i++)
2811 + ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
2812 +
2813 + memcpy(adapter->tx_ring, temp_ring,
2814 + adapter->num_tx_queues * sizeof(struct ixgbe_ring));
2815 +
2816 + adapter->tx_ring_count = new_tx_count;
2817 }
2818
2819 if (new_rx_count != adapter->rx_ring->count) {
2820 - for (i = 0; i < adapter->num_rx_queues; i++) {
2821 + memcpy(temp_ring, adapter->rx_ring,
2822 + adapter->num_rx_queues * sizeof(struct ixgbe_ring));
2823
2824 - old_rx_buf = adapter->rx_ring[i].rx_buffer_info;
2825 - old_desc = adapter->rx_ring[i].desc;
2826 - old_size = adapter->rx_ring[i].size;
2827 - old_dma = adapter->rx_ring[i].dma;
2828 -
2829 - adapter->rx_ring[i].rx_buffer_info = NULL;
2830 - adapter->rx_ring[i].desc = NULL;
2831 - adapter->rx_ring[i].dma = 0;
2832 - adapter->rx_ring[i].count = new_rx_count;
2833 - err = ixgbe_setup_rx_resources(adapter,
2834 - &adapter->rx_ring[i]);
2835 + for (i = 0; i < adapter->num_rx_queues; i++) {
2836 + temp_ring[i].count = new_rx_count;
2837 + err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
2838 if (err) {
2839 - adapter->rx_ring[i].rx_buffer_info = old_rx_buf;
2840 - adapter->rx_ring[i].desc = old_desc;
2841 - adapter->rx_ring[i].size = old_size;
2842 - adapter->rx_ring[i].dma = old_dma;
2843 + while (i) {
2844 + i--;
2845 + ixgbe_free_rx_resources(adapter,
2846 + &temp_ring[i]);
2847 + }
2848 goto err_setup;
2849 }
2850 -
2851 - vfree(old_rx_buf);
2852 - pci_free_consistent(adapter->pdev, old_size, old_desc,
2853 - old_dma);
2854 }
2855 +
2856 + for (i = 0; i < adapter->num_rx_queues; i++)
2857 + ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
2858 +
2859 + memcpy(adapter->rx_ring, temp_ring,
2860 + adapter->num_rx_queues * sizeof(struct ixgbe_ring));
2861 +
2862 + adapter->rx_ring_count = new_rx_count;
2863 }
2864
2865 + /* success! */
2866 err = 0;
2867 err_setup:
2868 - if (netif_running(adapter->netdev))
2869 + if (netif_running(netdev))
2870 ixgbe_up(adapter);
2871
2872 clear_bit(__IXGBE_RESETTING, &adapter->state);
2873 @@ -770,20 +771,31 @@ static int ixgbe_get_sset_count(struct n
2874 }
2875
2876 static void ixgbe_get_ethtool_stats(struct net_device *netdev,
2877 - struct ethtool_stats *stats, u64 *data)
2878 + struct ethtool_stats *stats, u64 *data)
2879 {
2880 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2881 u64 *queue_stat;
2882 int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
2883 int j, k;
2884 int i;
2885 +
2886 +#ifdef CONFIG_IXGBE_LRO
2887 u64 aggregated = 0, flushed = 0, no_desc = 0;
2888 + for (i = 0; i < adapter->num_rx_queues; i++) {
2889 + aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
2890 + flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
2891 + no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
2892 + }
2893 + adapter->lro_aggregated = aggregated;
2894 + adapter->lro_flushed = flushed;
2895 + adapter->lro_no_desc = no_desc;
2896 +#endif
2897
2898 ixgbe_update_stats(adapter);
2899 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
2900 char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset;
2901 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
2902 - sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2903 + sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2904 }
2905 for (j = 0; j < adapter->num_tx_queues; j++) {
2906 queue_stat = (u64 *)&adapter->tx_ring[j].stats;
2907 @@ -792,24 +804,18 @@ static void ixgbe_get_ethtool_stats(stru
2908 i += k;
2909 }
2910 for (j = 0; j < adapter->num_rx_queues; j++) {
2911 - aggregated += adapter->rx_ring[j].lro_mgr.stats.aggregated;
2912 - flushed += adapter->rx_ring[j].lro_mgr.stats.flushed;
2913 - no_desc += adapter->rx_ring[j].lro_mgr.stats.no_desc;
2914 queue_stat = (u64 *)&adapter->rx_ring[j].stats;
2915 for (k = 0; k < stat_count; k++)
2916 data[i + k] = queue_stat[k];
2917 i += k;
2918 }
2919 - adapter->lro_aggregated = aggregated;
2920 - adapter->lro_flushed = flushed;
2921 - adapter->lro_no_desc = no_desc;
2922 }
2923
2924 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
2925 - u8 *data)
2926 + u8 *data)
2927 {
2928 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2929 - u8 *p = data;
2930 + char *p = (char *)data;
2931 int i;
2932
2933 switch (stringset) {
2934 @@ -831,14 +837,14 @@ static void ixgbe_get_strings(struct net
2935 sprintf(p, "rx_queue_%u_bytes", i);
2936 p += ETH_GSTRING_LEN;
2937 }
2938 -/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
2939 + /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
2940 break;
2941 }
2942 }
2943
2944
2945 static void ixgbe_get_wol(struct net_device *netdev,
2946 - struct ethtool_wolinfo *wol)
2947 + struct ethtool_wolinfo *wol)
2948 {
2949 wol->supported = 0;
2950 wol->wolopts = 0;
2951 @@ -859,16 +865,17 @@ static int ixgbe_nway_reset(struct net_d
2952 static int ixgbe_phys_id(struct net_device *netdev, u32 data)
2953 {
2954 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2955 - u32 led_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_LEDCTL);
2956 + struct ixgbe_hw *hw = &adapter->hw;
2957 + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2958 u32 i;
2959
2960 if (!data || data > 300)
2961 data = 300;
2962
2963 for (i = 0; i < (data * 1000); i += 400) {
2964 - ixgbe_led_on(&adapter->hw, IXGBE_LED_ON);
2965 + hw->mac.ops.led_on(hw, IXGBE_LED_ON);
2966 msleep_interruptible(200);
2967 - ixgbe_led_off(&adapter->hw, IXGBE_LED_ON);
2968 + hw->mac.ops.led_off(hw, IXGBE_LED_ON);
2969 msleep_interruptible(200);
2970 }
2971
2972 @@ -879,67 +886,75 @@ static int ixgbe_phys_id(struct net_devi
2973 }
2974
2975 static int ixgbe_get_coalesce(struct net_device *netdev,
2976 - struct ethtool_coalesce *ec)
2977 + struct ethtool_coalesce *ec)
2978 {
2979 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2980
2981 - if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
2982 - ec->rx_coalesce_usecs = adapter->rx_eitr;
2983 - else
2984 - ec->rx_coalesce_usecs = 1000000 / adapter->rx_eitr;
2985 -
2986 - if (adapter->tx_eitr < IXGBE_MIN_ITR_USECS)
2987 - ec->tx_coalesce_usecs = adapter->tx_eitr;
2988 - else
2989 - ec->tx_coalesce_usecs = 1000000 / adapter->tx_eitr;
2990 -
2991 ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
2992 +
2993 + /* only valid if in constant ITR mode */
2994 + switch (adapter->itr_setting) {
2995 + case 0:
2996 + /* throttling disabled */
2997 + ec->rx_coalesce_usecs = 0;
2998 + break;
2999 + case 1:
3000 + /* dynamic ITR mode */
3001 + ec->rx_coalesce_usecs = 1;
3002 + break;
3003 + default:
3004 + /* fixed interrupt rate mode */
3005 + ec->rx_coalesce_usecs = 1000000/adapter->eitr_param;
3006 + break;
3007 + }
3008 return 0;
3009 }
3010
3011 static int ixgbe_set_coalesce(struct net_device *netdev,
3012 - struct ethtool_coalesce *ec)
3013 + struct ethtool_coalesce *ec)
3014 {
3015 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3016 -
3017 - if ((ec->rx_coalesce_usecs > IXGBE_MAX_ITR_USECS) ||
3018 - ((ec->rx_coalesce_usecs != 0) &&
3019 - (ec->rx_coalesce_usecs != 1) &&
3020 - (ec->rx_coalesce_usecs != 3) &&
3021 - (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
3022 - return -EINVAL;
3023 - if ((ec->tx_coalesce_usecs > IXGBE_MAX_ITR_USECS) ||
3024 - ((ec->tx_coalesce_usecs != 0) &&
3025 - (ec->tx_coalesce_usecs != 1) &&
3026 - (ec->tx_coalesce_usecs != 3) &&
3027 - (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
3028 - return -EINVAL;
3029 -
3030 - /* convert to rate of irq's per second */
3031 - if (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS)
3032 - adapter->rx_eitr = ec->rx_coalesce_usecs;
3033 - else
3034 - adapter->rx_eitr = (1000000 / ec->rx_coalesce_usecs);
3035 -
3036 - if (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS)
3037 - adapter->tx_eitr = ec->rx_coalesce_usecs;
3038 - else
3039 - adapter->tx_eitr = (1000000 / ec->tx_coalesce_usecs);
3040 + struct ixgbe_hw *hw = &adapter->hw;
3041 + int i;
3042
3043 if (ec->tx_max_coalesced_frames_irq)
3044 - adapter->tx_ring[0].work_limit =
3045 - ec->tx_max_coalesced_frames_irq;
3046 + adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq;
3047
3048 - if (netif_running(netdev)) {
3049 - ixgbe_down(adapter);
3050 - ixgbe_up(adapter);
3051 + if (ec->rx_coalesce_usecs > 1) {
3052 + /* store the value in ints/second */
3053 + adapter->eitr_param = 1000000/ec->rx_coalesce_usecs;
3054 +
3055 + /* static value of interrupt rate */
3056 + adapter->itr_setting = adapter->eitr_param;
3057 + /* clear the lower bit */
3058 + adapter->itr_setting &= ~1;
3059 + } else if (ec->rx_coalesce_usecs == 1) {
3060 + /* 1 means dynamic mode */
3061 + adapter->eitr_param = 20000;
3062 + adapter->itr_setting = 1;
3063 + } else {
3064 + /* any other value means disable eitr, which is best
3065 + * served by setting the interrupt rate very high */
3066 + adapter->eitr_param = 3000000;
3067 + adapter->itr_setting = 0;
3068 + }
3069 +
3070 + for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
3071 + struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
3072 + if (q_vector->txr_count && !q_vector->rxr_count)
3073 + q_vector->eitr = (adapter->eitr_param >> 1);
3074 + else
3075 + /* rx only or mixed */
3076 + q_vector->eitr = adapter->eitr_param;
3077 + IXGBE_WRITE_REG(hw, IXGBE_EITR(i),
3078 + EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
3079 }
3080
3081 return 0;
3082 }
3083
3084
3085 -static struct ethtool_ops ixgbe_ethtool_ops = {
3086 +static const struct ethtool_ops ixgbe_ethtool_ops = {
3087 .get_settings = ixgbe_get_settings,
3088 .set_settings = ixgbe_set_settings,
3089 .get_drvinfo = ixgbe_get_drvinfo,
3090 @@ -966,7 +981,7 @@ static struct ethtool_ops ixgbe_ethtool_
3091 .set_tso = ixgbe_set_tso,
3092 .get_strings = ixgbe_get_strings,
3093 .phys_id = ixgbe_phys_id,
3094 - .get_sset_count = ixgbe_get_sset_count,
3095 + .get_sset_count = ixgbe_get_sset_count,
3096 .get_ethtool_stats = ixgbe_get_ethtool_stats,
3097 .get_coalesce = ixgbe_get_coalesce,
3098 .set_coalesce = ixgbe_set_coalesce,
3099 --- a/drivers/net/ixgbe/ixgbe.h
3100 +++ b/drivers/net/ixgbe/ixgbe.h
3101 @@ -1,7 +1,7 @@
3102 /*******************************************************************************
3103
3104 Intel 10 Gigabit PCI Express Linux driver
3105 - Copyright(c) 1999 - 2007 Intel Corporation.
3106 + Copyright(c) 1999 - 2008 Intel Corporation.
3107
3108 This program is free software; you can redistribute it and/or modify it
3109 under the terms and conditions of the GNU General Public License,
3110 @@ -20,7 +20,6 @@
3111 the file called "COPYING".
3112
3113 Contact Information:
3114 - Linux NICS <linux.nics@intel.com>
3115 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
3116 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
3117
3118 @@ -32,17 +31,20 @@
3119 #include <linux/types.h>
3120 #include <linux/pci.h>
3121 #include <linux/netdevice.h>
3122 +
3123 +#ifdef CONFIG_IXGBE_LRO
3124 #include <linux/inet_lro.h>
3125 +#define IXGBE_MAX_LRO_AGGREGATE 32
3126 +#define IXGBE_MAX_LRO_DESCRIPTORS 8
3127 +#endif
3128
3129 #include "ixgbe_type.h"
3130 #include "ixgbe_common.h"
3131
3132 -#ifdef CONFIG_DCA
3133 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3134 #include <linux/dca.h>
3135 #endif
3136
3137 -#define IXGBE_ERR(args...) printk(KERN_ERR "ixgbe: " args)
3138 -
3139 #define PFX "ixgbe: "
3140 #define DPRINTK(nlevel, klevel, fmt, args...) \
3141 ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
3142 @@ -58,23 +60,14 @@
3143 #define IXGBE_MAX_RXD 4096
3144 #define IXGBE_MIN_RXD 64
3145
3146 -#define IXGBE_DEFAULT_RXQ 1
3147 -#define IXGBE_MAX_RXQ 1
3148 -#define IXGBE_MIN_RXQ 1
3149 -
3150 -#define IXGBE_DEFAULT_ITR_RX_USECS 125 /* 8k irqs/sec */
3151 -#define IXGBE_DEFAULT_ITR_TX_USECS 250 /* 4k irqs/sec */
3152 -#define IXGBE_MIN_ITR_USECS 100 /* 500k irqs/sec */
3153 -#define IXGBE_MAX_ITR_USECS 10000 /* 100 irqs/sec */
3154 -
3155 /* flow control */
3156 #define IXGBE_DEFAULT_FCRTL 0x10000
3157 -#define IXGBE_MIN_FCRTL 0
3158 +#define IXGBE_MIN_FCRTL 0x40
3159 #define IXGBE_MAX_FCRTL 0x7FF80
3160 #define IXGBE_DEFAULT_FCRTH 0x20000
3161 -#define IXGBE_MIN_FCRTH 0
3162 +#define IXGBE_MIN_FCRTH 0x600
3163 #define IXGBE_MAX_FCRTH 0x7FFF0
3164 -#define IXGBE_DEFAULT_FCPAUSE 0x6800 /* may be too long */
3165 +#define IXGBE_DEFAULT_FCPAUSE 0xFFFF
3166 #define IXGBE_MIN_FCPAUSE 0
3167 #define IXGBE_MAX_FCPAUSE 0xFFFF
3168
3169 @@ -88,9 +81,6 @@
3170
3171 #define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
3172
3173 -/* How many Tx Descriptors do we need to call netif_wake_queue? */
3174 -#define IXGBE_TX_QUEUE_WAKE 16
3175 -
3176 /* How many Rx Buffers do we bundle into one write to the hardware ? */
3177 #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
3178
3179 @@ -101,9 +91,6 @@
3180 #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
3181 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16
3182
3183 -#define IXGBE_MAX_LRO_DESCRIPTORS 8
3184 -#define IXGBE_MAX_LRO_AGGREGATE 32
3185 -
3186 /* wrapper around a pointer to a socket buffer,
3187 * so a DMA handle can be stored along with the buffer */
3188 struct ixgbe_tx_buffer {
3189 @@ -119,6 +106,7 @@ struct ixgbe_rx_buffer {
3190 dma_addr_t dma;
3191 struct page *page;
3192 dma_addr_t page_dma;
3193 + unsigned int page_offset;
3194 };
3195
3196 struct ixgbe_queue_stats {
3197 @@ -150,22 +138,22 @@ struct ixgbe_ring {
3198 * offset associated with this ring, which is different
3199 * for DCE and RSS modes */
3200
3201 -#ifdef CONFIG_DCA
3202 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3203 /* cpu for tx queue */
3204 int cpu;
3205 #endif
3206 +#ifdef CONFIG_IXGBE_LRO
3207 struct net_lro_mgr lro_mgr;
3208 bool lro_used;
3209 +#endif
3210 struct ixgbe_queue_stats stats;
3211 - u8 v_idx; /* maps directly to the index for this ring in the hardware
3212 - * vector array, can also be used for finding the bit in EICR
3213 - * and friends that represents the vector for this ring */
3214 + u16 v_idx; /* maps directly to the index for this ring in the hardware
3215 + * vector array, can also be used for finding the bit in EICR
3216 + * and friends that represents the vector for this ring */
3217
3218 - u32 eims_value;
3219 - u16 itr_register;
3220
3221 - char name[IFNAMSIZ + 5];
3222 u16 work_limit; /* max work per interrupt */
3223 + u16 rx_buf_len;
3224 };
3225
3226 #define RING_F_VMDQ 1
3227 @@ -190,8 +178,8 @@ struct ixgbe_q_vector {
3228 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
3229 u8 rxr_count; /* Rx ring count assigned to this vector */
3230 u8 txr_count; /* Tx ring count assigned to this vector */
3231 - u8 tx_eitr;
3232 - u8 rx_eitr;
3233 + u8 tx_itr;
3234 + u8 rx_itr;
3235 u32 eitr;
3236 };
3237
3238 @@ -228,7 +216,6 @@ struct ixgbe_adapter {
3239 struct timer_list watchdog_timer;
3240 struct vlan_group *vlgrp;
3241 u16 bd_number;
3242 - u16 rx_buf_len;
3243 struct work_struct reset_task;
3244 struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS];
3245 char name[MAX_MSIX_COUNT][IFNAMSIZ + 5];
3246 @@ -240,7 +227,9 @@ struct ixgbe_adapter {
3247
3248 /* TX */
3249 struct ixgbe_ring *tx_ring; /* One per active queue */
3250 + int num_tx_queues;
3251 u64 restart_queue;
3252 + u64 hw_csum_tx_good;
3253 u64 lsc_int;
3254 u64 hw_tso_ctxt;
3255 u64 hw_tso6_ctxt;
3256 @@ -249,12 +238,10 @@ struct ixgbe_adapter {
3257
3258 /* RX */
3259 struct ixgbe_ring *rx_ring; /* One per active queue */
3260 - u64 hw_csum_tx_good;
3261 + int num_rx_queues;
3262 u64 hw_csum_rx_error;
3263 u64 hw_csum_rx_good;
3264 u64 non_eop_descs;
3265 - int num_tx_queues;
3266 - int num_rx_queues;
3267 int num_msix_vectors;
3268 struct ixgbe_ring_feature ring_feature[3];
3269 struct msix_entry *msix_entries;
3270 @@ -267,15 +254,28 @@ struct ixgbe_adapter {
3271 * thus the additional *_CAPABLE flags.
3272 */
3273 u32 flags;
3274 -#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1 << 0)
3275 -#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1)
3276 -#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 2)
3277 -#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
3278 -#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
3279 -#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5)
3280 -#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 6)
3281 -#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 7)
3282 -#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8)
3283 +#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
3284 +#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1)
3285 +#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2)
3286 +#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3)
3287 +#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4)
3288 +#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6)
3289 +#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7)
3290 +#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8)
3291 +#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9)
3292 +#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10)
3293 +#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11)
3294 +#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12)
3295 +#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13)
3296 +#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16)
3297 +#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17)
3298 +#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18)
3299 +#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
3300 +#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
3301 +#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23)
3302 +
3303 +/* default to trying for four seconds */
3304 +#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
3305
3306 /* OS defined structs */
3307 struct net_device *netdev;
3308 @@ -288,14 +288,23 @@ struct ixgbe_adapter {
3309 struct ixgbe_hw_stats stats;
3310
3311 /* Interrupt Throttle Rate */
3312 - u32 rx_eitr;
3313 - u32 tx_eitr;
3314 + u32 eitr_param;
3315
3316 unsigned long state;
3317 u64 tx_busy;
3318 +#ifndef IXGBE_NO_INET_LRO
3319 u64 lro_aggregated;
3320 u64 lro_flushed;
3321 u64 lro_no_desc;
3322 +#endif
3323 + unsigned int tx_ring_count;
3324 + unsigned int rx_ring_count;
3325 +
3326 + u32 link_speed;
3327 + bool link_up;
3328 + unsigned long link_check_timeout;
3329 +
3330 + struct work_struct watchdog_task;
3331 };
3332
3333 enum ixbge_state_t {
3334 @@ -317,11 +326,11 @@ extern int ixgbe_up(struct ixgbe_adapter
3335 extern void ixgbe_down(struct ixgbe_adapter *adapter);
3336 extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
3337 extern void ixgbe_reset(struct ixgbe_adapter *adapter);
3338 -extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
3339 extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
3340 -extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
3341 - struct ixgbe_ring *rxdr);
3342 -extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
3343 - struct ixgbe_ring *txdr);
3344 +extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
3345 +extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
3346 +extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
3347 +extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
3348 +extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
3349
3350 #endif /* _IXGBE_H_ */
3351 --- a/drivers/net/ixgbe/ixgbe_main.c
3352 +++ b/drivers/net/ixgbe/ixgbe_main.c
3353 @@ -1,7 +1,7 @@
3354 /*******************************************************************************
3355
3356 Intel 10 Gigabit PCI Express Linux driver
3357 - Copyright(c) 1999 - 2007 Intel Corporation.
3358 + Copyright(c) 1999 - 2008 Intel Corporation.
3359
3360 This program is free software; you can redistribute it and/or modify it
3361 under the terms and conditions of the GNU General Public License,
3362 @@ -20,7 +20,6 @@
3363 the file called "COPYING".
3364
3365 Contact Information:
3366 - Linux NICS <linux.nics@intel.com>
3367 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
3368 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
3369
3370 @@ -46,15 +45,14 @@
3371
3372 char ixgbe_driver_name[] = "ixgbe";
3373 static const char ixgbe_driver_string[] =
3374 - "Intel(R) 10 Gigabit PCI Express Network Driver";
3375 + "Intel(R) 10 Gigabit PCI Express Network Driver";
3376
3377 -#define DRV_VERSION "1.3.18-k4"
3378 +#define DRV_VERSION "1.3.30-k2"
3379 const char ixgbe_driver_version[] = DRV_VERSION;
3380 -static const char ixgbe_copyright[] =
3381 - "Copyright (c) 1999-2007 Intel Corporation.";
3382 +static char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation.";
3383
3384 static const struct ixgbe_info *ixgbe_info_tbl[] = {
3385 - [board_82598] = &ixgbe_82598_info,
3386 + [board_82598] = &ixgbe_82598_info,
3387 };
3388
3389 /* ixgbe_pci_tbl - PCI Device ID Table
3390 @@ -74,15 +72,17 @@ static struct pci_device_id ixgbe_pci_tb
3391 board_82598 },
3392 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
3393 board_82598 },
3394 + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
3395 + board_82598 },
3396
3397 /* required last entry */
3398 {0, }
3399 };
3400 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
3401
3402 -#ifdef CONFIG_DCA
3403 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3404 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
3405 - void *p);
3406 + void *p);
3407 static struct notifier_block dca_notifier = {
3408 .notifier_call = ixgbe_notify_dca,
3409 .next = NULL,
3410 @@ -104,7 +104,7 @@ static void ixgbe_release_hw_control(str
3411 /* Let firmware take over control of h/w */
3412 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3413 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
3414 - ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
3415 + ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
3416 }
3417
3418 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
3419 @@ -114,24 +114,11 @@ static void ixgbe_get_hw_control(struct
3420 /* Let firmware know the driver has taken over */
3421 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3422 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
3423 - ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
3424 -}
3425 -
3426 -#ifdef DEBUG
3427 -/**
3428 - * ixgbe_get_hw_dev_name - return device name string
3429 - * used by hardware layer to print debugging information
3430 - **/
3431 -char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
3432 -{
3433 - struct ixgbe_adapter *adapter = hw->back;
3434 - struct net_device *netdev = adapter->netdev;
3435 - return netdev->name;
3436 + ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
3437 }
3438 -#endif
3439
3440 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
3441 - u8 msix_vector)
3442 + u8 msix_vector)
3443 {
3444 u32 ivar, index;
3445
3446 @@ -144,13 +131,12 @@ static void ixgbe_set_ivar(struct ixgbe_
3447 }
3448
3449 static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
3450 - struct ixgbe_tx_buffer
3451 - *tx_buffer_info)
3452 + struct ixgbe_tx_buffer
3453 + *tx_buffer_info)
3454 {
3455 if (tx_buffer_info->dma) {
3456 - pci_unmap_page(adapter->pdev,
3457 - tx_buffer_info->dma,
3458 - tx_buffer_info->length, PCI_DMA_TODEVICE);
3459 + pci_unmap_page(adapter->pdev, tx_buffer_info->dma,
3460 + tx_buffer_info->length, PCI_DMA_TODEVICE);
3461 tx_buffer_info->dma = 0;
3462 }
3463 if (tx_buffer_info->skb) {
3464 @@ -161,107 +147,120 @@ static void ixgbe_unmap_and_free_tx_reso
3465 }
3466
3467 static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
3468 - struct ixgbe_ring *tx_ring,
3469 - unsigned int eop,
3470 - union ixgbe_adv_tx_desc *eop_desc)
3471 + struct ixgbe_ring *tx_ring,
3472 + unsigned int eop)
3473 {
3474 + struct ixgbe_hw *hw = &adapter->hw;
3475 + u32 head, tail;
3476 +
3477 /* Detect a transmit hang in hardware, this serializes the
3478 - * check with the clearing of time_stamp and movement of i */
3479 + * check with the clearing of time_stamp and movement of eop */
3480 + head = IXGBE_READ_REG(hw, tx_ring->head);
3481 + tail = IXGBE_READ_REG(hw, tx_ring->tail);
3482 adapter->detect_tx_hung = false;
3483 - if (tx_ring->tx_buffer_info[eop].dma &&
3484 + if ((head != tail) &&
3485 + tx_ring->tx_buffer_info[eop].time_stamp &&
3486 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
3487 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
3488 /* detected Tx unit hang */
3489 + union ixgbe_adv_tx_desc *tx_desc;
3490 + tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
3491 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
3492 - " TDH <%x>\n"
3493 - " TDT <%x>\n"
3494 + " Tx Queue <%d>\n"
3495 + " TDH, TDT <%x>, <%x>\n"
3496 " next_to_use <%x>\n"
3497 " next_to_clean <%x>\n"
3498 "tx_buffer_info[next_to_clean]\n"
3499 " time_stamp <%lx>\n"
3500 - " next_to_watch <%x>\n"
3501 - " jiffies <%lx>\n"
3502 - " next_to_watch.status <%x>\n",
3503 - readl(adapter->hw.hw_addr + tx_ring->head),
3504 - readl(adapter->hw.hw_addr + tx_ring->tail),
3505 - tx_ring->next_to_use,
3506 - tx_ring->next_to_clean,
3507 - tx_ring->tx_buffer_info[eop].time_stamp,
3508 - eop, jiffies, eop_desc->wb.status);
3509 + " jiffies <%lx>\n",
3510 + tx_ring->queue_index,
3511 + head, tail,
3512 + tx_ring->next_to_use, eop,
3513 + tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
3514 return true;
3515 }
3516
3517 return false;
3518 }
3519
3520 -#define IXGBE_MAX_TXD_PWR 14
3521 -#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
3522 +#define IXGBE_MAX_TXD_PWR 14
3523 +#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
3524
3525 /* Tx Descriptors needed, worst case */
3526 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
3527 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
3528 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
3529 - MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
3530 + MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
3531 +
3532 +#define GET_TX_HEAD_FROM_RING(ring) (\
3533 + *(volatile u32 *) \
3534 + ((union ixgbe_adv_tx_desc *)(ring)->desc + (ring)->count))
3535 +static void ixgbe_tx_timeout(struct net_device *netdev);
3536
3537 /**
3538 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
3539 * @adapter: board private structure
3540 + * @tx_ring: tx ring to clean
3541 **/
3542 static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
3543 - struct ixgbe_ring *tx_ring)
3544 + struct ixgbe_ring *tx_ring)
3545 {
3546 - struct net_device *netdev = adapter->netdev;
3547 - union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
3548 + union ixgbe_adv_tx_desc *tx_desc;
3549 struct ixgbe_tx_buffer *tx_buffer_info;
3550 - unsigned int i, eop;
3551 - bool cleaned = false;
3552 - unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3553 -
3554 + struct net_device *netdev = adapter->netdev;
3555 + struct sk_buff *skb;
3556 + unsigned int i;
3557 + u32 head, oldhead;
3558 + unsigned int count = 0;
3559 + unsigned int total_bytes = 0, total_packets = 0;
3560 +
3561 + rmb();
3562 + head = GET_TX_HEAD_FROM_RING(tx_ring);
3563 + head = le32_to_cpu(head);
3564 i = tx_ring->next_to_clean;
3565 - eop = tx_ring->tx_buffer_info[i].next_to_watch;
3566 - eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
3567 - while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) {
3568 - cleaned = false;
3569 - while (!cleaned) {
3570 + while (1) {
3571 + while (i != head) {
3572 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
3573 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3574 - cleaned = (i == eop);
3575 + skb = tx_buffer_info->skb;
3576
3577 - tx_ring->stats.bytes += tx_buffer_info->length;
3578 - if (cleaned) {
3579 - struct sk_buff *skb = tx_buffer_info->skb;
3580 + if (skb) {
3581 unsigned int segs, bytecount;
3582 +
3583 + /* gso_segs is currently only valid for tcp */
3584 segs = skb_shinfo(skb)->gso_segs ?: 1;
3585 /* multiply data chunks by size of headers */
3586 bytecount = ((segs - 1) * skb_headlen(skb)) +
3587 - skb->len;
3588 - total_tx_packets += segs;
3589 - total_tx_bytes += bytecount;
3590 + skb->len;
3591 + total_packets += segs;
3592 + total_bytes += bytecount;
3593 }
3594 +
3595 ixgbe_unmap_and_free_tx_resource(adapter,
3596 - tx_buffer_info);
3597 - tx_desc->wb.status = 0;
3598 + tx_buffer_info);
3599
3600 i++;
3601 if (i == tx_ring->count)
3602 i = 0;
3603 - }
3604
3605 - tx_ring->stats.packets++;
3606 -
3607 - eop = tx_ring->tx_buffer_info[i].next_to_watch;
3608 - eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
3609 -
3610 - /* weight of a sort for tx, avoid endless transmit cleanup */
3611 - if (total_tx_packets >= tx_ring->work_limit)
3612 - break;
3613 - }
3614 + count++;
3615 + if (count == tx_ring->count)
3616 + goto done_cleaning;
3617 + }
3618 + oldhead = head;
3619 + rmb();
3620 + head = GET_TX_HEAD_FROM_RING(tx_ring);
3621 + head = le32_to_cpu(head);
3622 + if (head == oldhead)
3623 + goto done_cleaning;
3624 + } /* while (1) */
3625
3626 +done_cleaning:
3627 tx_ring->next_to_clean = i;
3628
3629 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
3630 - if (total_tx_packets && netif_carrier_ok(netdev) &&
3631 - (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3632 + if (unlikely(count && netif_carrier_ok(netdev) &&
3633 + (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
3634 /* Make sure that anybody stopping the queue after this
3635 * sees the new next_to_clean.
3636 */
3637 @@ -269,59 +268,68 @@ static bool ixgbe_clean_tx_irq(struct ix
3638 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
3639 !test_bit(__IXGBE_DOWN, &adapter->state)) {
3640 netif_wake_subqueue(netdev, tx_ring->queue_index);
3641 - adapter->restart_queue++;
3642 + ++adapter->restart_queue;
3643 }
3644 }
3645
3646 - if (adapter->detect_tx_hung)
3647 - if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc))
3648 - netif_stop_subqueue(netdev, tx_ring->queue_index);
3649 -
3650 - if (total_tx_packets >= tx_ring->work_limit)
3651 - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value);
3652 + if (adapter->detect_tx_hung) {
3653 + if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
3654 + /* schedule immediate reset if we believe we hung */
3655 + DPRINTK(PROBE, INFO,
3656 + "tx hang %d detected, resetting adapter\n",
3657 + adapter->tx_timeout_count + 1);
3658 + ixgbe_tx_timeout(adapter->netdev);
3659 + }
3660 + }
3661
3662 - tx_ring->total_bytes += total_tx_bytes;
3663 - tx_ring->total_packets += total_tx_packets;
3664 - adapter->net_stats.tx_bytes += total_tx_bytes;
3665 - adapter->net_stats.tx_packets += total_tx_packets;
3666 - cleaned = total_tx_packets ? true : false;
3667 - return cleaned;
3668 + /* re-arm the interrupt */
3669 + if ((total_packets >= tx_ring->work_limit) ||
3670 + (count == tx_ring->count))
3671 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx);
3672 +
3673 + tx_ring->total_bytes += total_bytes;
3674 + tx_ring->total_packets += total_packets;
3675 + tx_ring->stats.bytes += total_bytes;
3676 + tx_ring->stats.packets += total_packets;
3677 + adapter->net_stats.tx_bytes += total_bytes;
3678 + adapter->net_stats.tx_packets += total_packets;
3679 + return (total_packets ? true : false);
3680 }
3681
3682 -#ifdef CONFIG_DCA
3683 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3684 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
3685 - struct ixgbe_ring *rxr)
3686 + struct ixgbe_ring *rx_ring)
3687 {
3688 u32 rxctrl;
3689 int cpu = get_cpu();
3690 - int q = rxr - adapter->rx_ring;
3691 + int q = rx_ring - adapter->rx_ring;
3692
3693 - if (rxr->cpu != cpu) {
3694 + if (rx_ring->cpu != cpu) {
3695 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
3696 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
3697 - rxctrl |= dca_get_tag(cpu);
3698 + rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
3699 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
3700 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
3701 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
3702 - rxr->cpu = cpu;
3703 + rx_ring->cpu = cpu;
3704 }
3705 put_cpu();
3706 }
3707
3708 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
3709 - struct ixgbe_ring *txr)
3710 + struct ixgbe_ring *tx_ring)
3711 {
3712 u32 txctrl;
3713 int cpu = get_cpu();
3714 - int q = txr - adapter->tx_ring;
3715 + int q = tx_ring - adapter->tx_ring;
3716
3717 - if (txr->cpu != cpu) {
3718 + if (tx_ring->cpu != cpu) {
3719 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
3720 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
3721 - txctrl |= dca_get_tag(cpu);
3722 + txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
3723 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
3724 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
3725 - txr->cpu = cpu;
3726 + tx_ring->cpu = cpu;
3727 }
3728 put_cpu();
3729 }
3730 @@ -351,11 +359,14 @@ static int __ixgbe_notify_dca(struct dev
3731
3732 switch (event) {
3733 case DCA_PROVIDER_ADD:
3734 - adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
3735 + /* if we're already enabled, don't do it again */
3736 + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
3737 + break;
3738 /* Always use CB2 mode, difference is masked
3739 * in the CB driver. */
3740 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
3741 if (dca_add_requester(dev) == 0) {
3742 + adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
3743 ixgbe_setup_dca(adapter);
3744 break;
3745 }
3746 @@ -372,7 +383,7 @@ static int __ixgbe_notify_dca(struct dev
3747 return 0;
3748 }
3749
3750 -#endif /* CONFIG_DCA */
3751 +#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
3752 /**
3753 * ixgbe_receive_skb - Send a completed packet up the stack
3754 * @adapter: board private structure
3755 @@ -382,13 +393,14 @@ static int __ixgbe_notify_dca(struct dev
3756 * @rx_desc: rx descriptor
3757 **/
3758 static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
3759 - struct sk_buff *skb, u8 status,
3760 - struct ixgbe_ring *ring,
3761 + struct sk_buff *skb, u8 status,
3762 + struct ixgbe_ring *ring,
3763 union ixgbe_adv_rx_desc *rx_desc)
3764 {
3765 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
3766 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
3767
3768 +#ifdef CONFIG_IXGBE_LRO
3769 if (adapter->netdev->features & NETIF_F_LRO &&
3770 skb->ip_summed == CHECKSUM_UNNECESSARY) {
3771 if (adapter->vlgrp && is_vlan)
3772 @@ -399,6 +411,7 @@ static void ixgbe_receive_skb(struct ixg
3773 lro_receive_skb(&ring->lro_mgr, skb, rx_desc);
3774 ring->lro_used = true;
3775 } else {
3776 +#endif
3777 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
3778 if (adapter->vlgrp && is_vlan)
3779 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
3780 @@ -410,7 +423,9 @@ static void ixgbe_receive_skb(struct ixg
3781 else
3782 netif_rx(skb);
3783 }
3784 +#ifdef CONFIG_IXGBE_LRO
3785 }
3786 +#endif
3787 }
3788
3789 /**
3790 @@ -420,14 +435,12 @@ static void ixgbe_receive_skb(struct ixg
3791 * @skb: skb currently being received and modified
3792 **/
3793 static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
3794 - u32 status_err,
3795 - struct sk_buff *skb)
3796 + u32 status_err, struct sk_buff *skb)
3797 {
3798 skb->ip_summed = CHECKSUM_NONE;
3799
3800 - /* Ignore Checksum bit is set, or rx csum disabled */
3801 - if ((status_err & IXGBE_RXD_STAT_IXSM) ||
3802 - !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
3803 + /* Rx csum disabled */
3804 + if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
3805 return;
3806
3807 /* if IP and error */
3808 @@ -455,37 +468,44 @@ static inline void ixgbe_rx_checksum(str
3809 * @adapter: address of board private structure
3810 **/
3811 static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
3812 - struct ixgbe_ring *rx_ring,
3813 - int cleaned_count)
3814 + struct ixgbe_ring *rx_ring,
3815 + int cleaned_count)
3816 {
3817 - struct net_device *netdev = adapter->netdev;
3818 struct pci_dev *pdev = adapter->pdev;
3819 union ixgbe_adv_rx_desc *rx_desc;
3820 - struct ixgbe_rx_buffer *rx_buffer_info;
3821 - struct sk_buff *skb;
3822 + struct ixgbe_rx_buffer *bi;
3823 unsigned int i;
3824 - unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN;
3825 + unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
3826
3827 i = rx_ring->next_to_use;
3828 - rx_buffer_info = &rx_ring->rx_buffer_info[i];
3829 + bi = &rx_ring->rx_buffer_info[i];
3830
3831 while (cleaned_count--) {
3832 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
3833
3834 - if (!rx_buffer_info->page &&
3835 - (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
3836 - rx_buffer_info->page = alloc_page(GFP_ATOMIC);
3837 - if (!rx_buffer_info->page) {
3838 - adapter->alloc_rx_page_failed++;
3839 - goto no_buffers;
3840 + if (!bi->page_dma &&
3841 + (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
3842 + if (!bi->page) {
3843 + bi->page = alloc_page(GFP_ATOMIC);
3844 + if (!bi->page) {
3845 + adapter->alloc_rx_page_failed++;
3846 + goto no_buffers;
3847 + }
3848 + bi->page_offset = 0;
3849 + } else {
3850 + /* use a half page if we're re-using */
3851 + bi->page_offset ^= (PAGE_SIZE / 2);
3852 }
3853 - rx_buffer_info->page_dma =
3854 - pci_map_page(pdev, rx_buffer_info->page,
3855 - 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
3856 +
3857 + bi->page_dma = pci_map_page(pdev, bi->page,
3858 + bi->page_offset,
3859 + (PAGE_SIZE / 2),
3860 + PCI_DMA_FROMDEVICE);
3861 }
3862
3863 - if (!rx_buffer_info->skb) {
3864 - skb = netdev_alloc_skb(netdev, bufsz);
3865 + if (!bi->skb) {
3866 + struct sk_buff *skb = netdev_alloc_skb(adapter->netdev,
3867 + bufsz);
3868
3869 if (!skb) {
3870 adapter->alloc_rx_buff_failed++;
3871 @@ -499,28 +519,25 @@ static void ixgbe_alloc_rx_buffers(struc
3872 */
3873 skb_reserve(skb, NET_IP_ALIGN);
3874
3875 - rx_buffer_info->skb = skb;
3876 - rx_buffer_info->dma = pci_map_single(pdev, skb->data,
3877 - bufsz,
3878 - PCI_DMA_FROMDEVICE);
3879 + bi->skb = skb;
3880 + bi->dma = pci_map_single(pdev, skb->data, bufsz,
3881 + PCI_DMA_FROMDEVICE);
3882 }
3883 /* Refresh the desc even if buffer_addrs didn't change because
3884 * each write-back erases this info. */
3885 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
3886 - rx_desc->read.pkt_addr =
3887 - cpu_to_le64(rx_buffer_info->page_dma);
3888 - rx_desc->read.hdr_addr =
3889 - cpu_to_le64(rx_buffer_info->dma);
3890 + rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
3891 + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
3892 } else {
3893 - rx_desc->read.pkt_addr =
3894 - cpu_to_le64(rx_buffer_info->dma);
3895 + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
3896 }
3897
3898 i++;
3899 if (i == rx_ring->count)
3900 i = 0;
3901 - rx_buffer_info = &rx_ring->rx_buffer_info[i];
3902 + bi = &rx_ring->rx_buffer_info[i];
3903 }
3904 +
3905 no_buffers:
3906 if (rx_ring->next_to_use != i) {
3907 rx_ring->next_to_use = i;
3908 @@ -538,46 +555,54 @@ no_buffers:
3909 }
3910 }
3911
3912 +static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
3913 +{
3914 + return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
3915 +}
3916 +
3917 +static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
3918 +{
3919 + return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
3920 +}
3921 +
3922 static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
3923 - struct ixgbe_ring *rx_ring,
3924 - int *work_done, int work_to_do)
3925 + struct ixgbe_ring *rx_ring,
3926 + int *work_done, int work_to_do)
3927 {
3928 - struct net_device *netdev = adapter->netdev;
3929 struct pci_dev *pdev = adapter->pdev;
3930 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
3931 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
3932 struct sk_buff *skb;
3933 unsigned int i;
3934 - u32 upper_len, len, staterr;
3935 + u32 len, staterr;
3936 u16 hdr_info;
3937 bool cleaned = false;
3938 int cleaned_count = 0;
3939 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
3940
3941 i = rx_ring->next_to_clean;
3942 - upper_len = 0;
3943 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
3944 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3945 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3946
3947 while (staterr & IXGBE_RXD_STAT_DD) {
3948 + u32 upper_len = 0;
3949 if (*work_done >= work_to_do)
3950 break;
3951 (*work_done)++;
3952
3953 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
3954 - hdr_info =
3955 - le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info);
3956 - len =
3957 - ((hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3958 - IXGBE_RXDADV_HDRBUFLEN_SHIFT);
3959 + hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
3960 + len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3961 + IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3962 if (hdr_info & IXGBE_RXDADV_SPH)
3963 adapter->rx_hdr_split++;
3964 if (len > IXGBE_RX_HDR_SIZE)
3965 len = IXGBE_RX_HDR_SIZE;
3966 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
3967 - } else
3968 + } else {
3969 len = le16_to_cpu(rx_desc->wb.upper.length);
3970 + }
3971
3972 cleaned = true;
3973 skb = rx_buffer_info->skb;
3974 @@ -586,18 +611,25 @@ static bool ixgbe_clean_rx_irq(struct ix
3975
3976 if (len && !skb_shinfo(skb)->nr_frags) {
3977 pci_unmap_single(pdev, rx_buffer_info->dma,
3978 - adapter->rx_buf_len + NET_IP_ALIGN,
3979 - PCI_DMA_FROMDEVICE);
3980 + rx_ring->rx_buf_len + NET_IP_ALIGN,
3981 + PCI_DMA_FROMDEVICE);
3982 skb_put(skb, len);
3983 }
3984
3985 if (upper_len) {
3986 pci_unmap_page(pdev, rx_buffer_info->page_dma,
3987 - PAGE_SIZE, PCI_DMA_FROMDEVICE);
3988 + PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
3989 rx_buffer_info->page_dma = 0;
3990 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
3991 - rx_buffer_info->page, 0, upper_len);
3992 - rx_buffer_info->page = NULL;
3993 + rx_buffer_info->page,
3994 + rx_buffer_info->page_offset,
3995 + upper_len);
3996 +
3997 + if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
3998 + (page_count(rx_buffer_info->page) != 1))
3999 + rx_buffer_info->page = NULL;
4000 + else
4001 + get_page(rx_buffer_info->page);
4002
4003 skb->len += upper_len;
4004 skb->data_len += upper_len;
4005 @@ -620,6 +652,7 @@ static bool ixgbe_clean_rx_irq(struct ix
4006 rx_buffer_info->skb = next_buffer->skb;
4007 rx_buffer_info->dma = next_buffer->dma;
4008 next_buffer->skb = skb;
4009 + next_buffer->dma = 0;
4010 adapter->non_eop_descs++;
4011 goto next_desc;
4012 }
4013 @@ -635,9 +668,9 @@ static bool ixgbe_clean_rx_irq(struct ix
4014 total_rx_bytes += skb->len;
4015 total_rx_packets++;
4016
4017 - skb->protocol = eth_type_trans(skb, netdev);
4018 + skb->protocol = eth_type_trans(skb, adapter->netdev);
4019 ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
4020 - netdev->last_rx = jiffies;
4021 + adapter->netdev->last_rx = jiffies;
4022
4023 next_desc:
4024 rx_desc->wb.upper.status_error = 0;
4025 @@ -655,10 +688,12 @@ next_desc:
4026 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
4027 }
4028
4029 +#ifdef CONFIG_IXGBE_LRO
4030 if (rx_ring->lro_used) {
4031 lro_flush_all(&rx_ring->lro_mgr);
4032 rx_ring->lro_used = false;
4033 }
4034 +#endif
4035
4036 rx_ring->next_to_clean = i;
4037 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
4038 @@ -666,9 +701,6 @@ next_desc:
4039 if (cleaned_count)
4040 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
4041
4042 - adapter->net_stats.rx_bytes += total_rx_bytes;
4043 - adapter->net_stats.rx_packets += total_rx_packets;
4044 -
4045 rx_ring->total_packets += total_rx_packets;
4046 rx_ring->total_bytes += total_rx_bytes;
4047 adapter->net_stats.rx_bytes += total_rx_bytes;
4048 @@ -700,43 +732,43 @@ static void ixgbe_configure_msix(struct
4049 q_vector = &adapter->q_vector[v_idx];
4050 /* XXX for_each_bit(...) */
4051 r_idx = find_first_bit(q_vector->rxr_idx,
4052 - adapter->num_rx_queues);
4053 + adapter->num_rx_queues);
4054
4055 for (i = 0; i < q_vector->rxr_count; i++) {
4056 j = adapter->rx_ring[r_idx].reg_idx;
4057 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx);
4058 r_idx = find_next_bit(q_vector->rxr_idx,
4059 - adapter->num_rx_queues,
4060 - r_idx + 1);
4061 + adapter->num_rx_queues,
4062 + r_idx + 1);
4063 }
4064 r_idx = find_first_bit(q_vector->txr_idx,
4065 - adapter->num_tx_queues);
4066 + adapter->num_tx_queues);
4067
4068 for (i = 0; i < q_vector->txr_count; i++) {
4069 j = adapter->tx_ring[r_idx].reg_idx;
4070 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx);
4071 r_idx = find_next_bit(q_vector->txr_idx,
4072 - adapter->num_tx_queues,
4073 - r_idx + 1);
4074 + adapter->num_tx_queues,
4075 + r_idx + 1);
4076 }
4077
4078 - /* if this is a tx only vector use half the irq (tx) rate */
4079 + /* if this is a tx only vector halve the interrupt rate */
4080 if (q_vector->txr_count && !q_vector->rxr_count)
4081 - q_vector->eitr = adapter->tx_eitr;
4082 + q_vector->eitr = (adapter->eitr_param >> 1);
4083 else
4084 - /* rx only or mixed */
4085 - q_vector->eitr = adapter->rx_eitr;
4086 + /* rx only */
4087 + q_vector->eitr = adapter->eitr_param;
4088
4089 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
4090 - EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
4091 + EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
4092 }
4093
4094 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx);
4095 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
4096
4097 - /* set up to autoclear timer, lsc, and the vectors */
4098 + /* set up to autoclear timer, and the vectors */
4099 mask = IXGBE_EIMS_ENABLE_MASK;
4100 - mask &= ~IXGBE_EIMS_OTHER;
4101 + mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
4102 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
4103 }
4104
4105 @@ -766,8 +798,8 @@ enum latency_range {
4106 * parameter (see ixgbe_param.c)
4107 **/
4108 static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
4109 - u32 eitr, u8 itr_setting,
4110 - int packets, int bytes)
4111 + u32 eitr, u8 itr_setting,
4112 + int packets, int bytes)
4113 {
4114 unsigned int retval = itr_setting;
4115 u32 timepassed_us;
4116 @@ -814,40 +846,40 @@ static void ixgbe_set_itr_msix(struct ix
4117 u32 new_itr;
4118 u8 current_itr, ret_itr;
4119 int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
4120 - sizeof(struct ixgbe_q_vector);
4121 + sizeof(struct ixgbe_q_vector);
4122 struct ixgbe_ring *rx_ring, *tx_ring;
4123
4124 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
4125 for (i = 0; i < q_vector->txr_count; i++) {
4126 tx_ring = &(adapter->tx_ring[r_idx]);
4127 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
4128 - q_vector->tx_eitr,
4129 - tx_ring->total_packets,
4130 - tx_ring->total_bytes);
4131 + q_vector->tx_itr,
4132 + tx_ring->total_packets,
4133 + tx_ring->total_bytes);
4134 /* if the result for this queue would decrease interrupt
4135 * rate for this vector then use that result */
4136 - q_vector->tx_eitr = ((q_vector->tx_eitr > ret_itr) ?
4137 - q_vector->tx_eitr - 1 : ret_itr);
4138 + q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
4139 + q_vector->tx_itr - 1 : ret_itr);
4140 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
4141 - r_idx + 1);
4142 + r_idx + 1);
4143 }
4144
4145 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
4146 for (i = 0; i < q_vector->rxr_count; i++) {
4147 rx_ring = &(adapter->rx_ring[r_idx]);
4148 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
4149 - q_vector->rx_eitr,
4150 - rx_ring->total_packets,
4151 - rx_ring->total_bytes);
4152 + q_vector->rx_itr,
4153 + rx_ring->total_packets,
4154 + rx_ring->total_bytes);
4155 /* if the result for this queue would decrease interrupt
4156 * rate for this vector then use that result */
4157 - q_vector->rx_eitr = ((q_vector->rx_eitr > ret_itr) ?
4158 - q_vector->rx_eitr - 1 : ret_itr);
4159 + q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
4160 + q_vector->rx_itr - 1 : ret_itr);
4161 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
4162 - r_idx + 1);
4163 + r_idx + 1);
4164 }
4165
4166 - current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr);
4167 + current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
4168
4169 switch (current_itr) {
4170 /* counts and packets in update_itr are dependent on these numbers */
4171 @@ -871,13 +903,27 @@ static void ixgbe_set_itr_msix(struct ix
4172 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
4173 /* must write high and low 16 bits to reset counter */
4174 DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx,
4175 - itr_reg);
4176 + itr_reg);
4177 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16);
4178 }
4179
4180 return;
4181 }
4182
4183 +
4184 +static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
4185 +{
4186 + struct ixgbe_hw *hw = &adapter->hw;
4187 +
4188 + adapter->lsc_int++;
4189 + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4190 + adapter->link_check_timeout = jiffies;
4191 + if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
4192 + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
4193 + schedule_work(&adapter->watchdog_task);
4194 + }
4195 +}
4196 +
4197 static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
4198 {
4199 struct net_device *netdev = data;
4200 @@ -885,11 +931,8 @@ static irqreturn_t ixgbe_msix_lsc(int ir
4201 struct ixgbe_hw *hw = &adapter->hw;
4202 u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4203
4204 - if (eicr & IXGBE_EICR_LSC) {
4205 - adapter->lsc_int++;
4206 - if (!test_bit(__IXGBE_DOWN, &adapter->state))
4207 - mod_timer(&adapter->watchdog_timer, jiffies);
4208 - }
4209 + if (eicr & IXGBE_EICR_LSC)
4210 + ixgbe_check_lsc(adapter);
4211
4212 if (!test_bit(__IXGBE_DOWN, &adapter->state))
4213 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
4214 @@ -901,7 +944,7 @@ static irqreturn_t ixgbe_msix_clean_tx(i
4215 {
4216 struct ixgbe_q_vector *q_vector = data;
4217 struct ixgbe_adapter *adapter = q_vector->adapter;
4218 - struct ixgbe_ring *txr;
4219 + struct ixgbe_ring *tx_ring;
4220 int i, r_idx;
4221
4222 if (!q_vector->txr_count)
4223 @@ -909,16 +952,16 @@ static irqreturn_t ixgbe_msix_clean_tx(i
4224
4225 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
4226 for (i = 0; i < q_vector->txr_count; i++) {
4227 - txr = &(adapter->tx_ring[r_idx]);
4228 -#ifdef CONFIG_DCA
4229 + tx_ring = &(adapter->tx_ring[r_idx]);
4230 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
4231 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
4232 - ixgbe_update_tx_dca(adapter, txr);
4233 + ixgbe_update_tx_dca(adapter, tx_ring);
4234 #endif
4235 - txr->total_bytes = 0;
4236 - txr->total_packets = 0;
4237 - ixgbe_clean_tx_irq(adapter, txr);
4238 + tx_ring->total_bytes = 0;
4239 + tx_ring->total_packets = 0;
4240 + ixgbe_clean_tx_irq(adapter, tx_ring);
4241 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
4242 - r_idx + 1);
4243 + r_idx + 1);
4244 }
4245
4246 return IRQ_HANDLED;
4247 @@ -933,18 +976,26 @@ static irqreturn_t ixgbe_msix_clean_rx(i
4248 {
4249 struct ixgbe_q_vector *q_vector = data;
4250 struct ixgbe_adapter *adapter = q_vector->adapter;
4251 - struct ixgbe_ring *rxr;
4252 + struct ixgbe_ring *rx_ring;
4253 int r_idx;
4254 + int i;
4255
4256 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
4257 + for (i = 0; i < q_vector->rxr_count; i++) {
4258 + rx_ring = &(adapter->rx_ring[r_idx]);
4259 + rx_ring->total_bytes = 0;
4260 + rx_ring->total_packets = 0;
4261 + r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
4262 + r_idx + 1);
4263 + }
4264 +
4265 if (!q_vector->rxr_count)
4266 return IRQ_HANDLED;
4267
4268 - rxr = &(adapter->rx_ring[r_idx]);
4269 + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
4270 + rx_ring = &(adapter->rx_ring[r_idx]);
4271 /* disable interrupts on this vector only */
4272 - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->v_idx);
4273 - rxr->total_bytes = 0;
4274 - rxr->total_packets = 0;
4275 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
4276 netif_rx_schedule(adapter->netdev, &q_vector->napi);
4277
4278 return IRQ_HANDLED;
4279 @@ -963,39 +1014,90 @@ static irqreturn_t ixgbe_msix_clean_many
4280 * @napi: napi struct with our devices info in it
4281 * @budget: amount of work driver is allowed to do this pass, in packets
4282 *
4283 + * This function is optimized for cleaning one queue only on a single
4284 + * q_vector!!!
4285 **/
4286 static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
4287 {
4288 struct ixgbe_q_vector *q_vector =
4289 - container_of(napi, struct ixgbe_q_vector, napi);
4290 + container_of(napi, struct ixgbe_q_vector, napi);
4291 struct ixgbe_adapter *adapter = q_vector->adapter;
4292 - struct ixgbe_ring *rxr;
4293 + struct ixgbe_ring *rx_ring = NULL;
4294 int work_done = 0;
4295 long r_idx;
4296
4297 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
4298 - rxr = &(adapter->rx_ring[r_idx]);
4299 -#ifdef CONFIG_DCA
4300 + rx_ring = &(adapter->rx_ring[r_idx]);
4301 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
4302 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
4303 - ixgbe_update_rx_dca(adapter, rxr);
4304 + ixgbe_update_rx_dca(adapter, rx_ring);
4305 #endif
4306
4307 - ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget);
4308 + ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
4309
4310 /* If all Rx work done, exit the polling mode */
4311 if (work_done < budget) {
4312 netif_rx_complete(adapter->netdev, napi);
4313 - if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
4314 + if (adapter->itr_setting & 3)
4315 ixgbe_set_itr_msix(q_vector);
4316 if (!test_bit(__IXGBE_DOWN, &adapter->state))
4317 - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->v_idx);
4318 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx);
4319 }
4320
4321 return work_done;
4322 }
4323
4324 +/**
4325 + * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine
4326 + * @napi: napi struct with our devices info in it
4327 + * @budget: amount of work driver is allowed to do this pass, in packets
4328 + *
4329 + * This function will clean more than one rx queue associated with a
4330 + * q_vector.
4331 + **/
4332 +static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
4333 +{
4334 + struct ixgbe_q_vector *q_vector =
4335 + container_of(napi, struct ixgbe_q_vector, napi);
4336 + struct ixgbe_adapter *adapter = q_vector->adapter;
4337 + struct ixgbe_ring *rx_ring = NULL;
4338 + int work_done = 0, i;
4339 + long r_idx;
4340 + u16 enable_mask = 0;
4341 +
4342 + /* attempt to distribute budget to each queue fairly, but don't allow
4343 + * the budget to go below 1 because we'll exit polling */
4344 + budget /= (q_vector->rxr_count ?: 1);
4345 + budget = max(budget, 1);
4346 + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
4347 + for (i = 0; i < q_vector->rxr_count; i++) {
4348 + rx_ring = &(adapter->rx_ring[r_idx]);
4349 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
4350 + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
4351 + ixgbe_update_rx_dca(adapter, rx_ring);
4352 +#endif
4353 + ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
4354 + enable_mask |= rx_ring->v_idx;
4355 + r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
4356 + r_idx + 1);
4357 + }
4358 +
4359 + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
4360 + rx_ring = &(adapter->rx_ring[r_idx]);
4361 + /* If all Rx work done, exit the polling mode */
4362 + if (work_done < budget) {
4363 + netif_rx_complete(adapter->netdev, napi);
4364 + if (adapter->itr_setting & 3)
4365 + ixgbe_set_itr_msix(q_vector);
4366 + if (!test_bit(__IXGBE_DOWN, &adapter->state))
4367 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask);
4368 + return 0;
4369 + }
4370 +
4371 + return work_done;
4372 +}
4373 static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
4374 - int r_idx)
4375 + int r_idx)
4376 {
4377 a->q_vector[v_idx].adapter = a;
4378 set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
4379 @@ -1004,7 +1106,7 @@ static inline void map_vector_to_rxq(str
4380 }
4381
4382 static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
4383 - int r_idx)
4384 + int r_idx)
4385 {
4386 a->q_vector[v_idx].adapter = a;
4387 set_bit(r_idx, a->q_vector[v_idx].txr_idx);
4388 @@ -1024,7 +1126,7 @@ static inline void map_vector_to_txq(str
4389 * mapping configurations in here.
4390 **/
4391 static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
4392 - int vectors)
4393 + int vectors)
4394 {
4395 int v_start = 0;
4396 int rxr_idx = 0, txr_idx = 0;
4397 @@ -1101,28 +1203,28 @@ static int ixgbe_request_msix_irqs(struc
4398 goto out;
4399
4400 #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
4401 - (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
4402 - &ixgbe_msix_clean_many)
4403 + (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
4404 + &ixgbe_msix_clean_many)
4405 for (vector = 0; vector < q_vectors; vector++) {
4406 handler = SET_HANDLER(&adapter->q_vector[vector]);
4407 sprintf(adapter->name[vector], "%s:v%d-%s",
4408 - netdev->name, vector,
4409 - (handler == &ixgbe_msix_clean_rx) ? "Rx" :
4410 - ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx"));
4411 + netdev->name, vector,
4412 + (handler == &ixgbe_msix_clean_rx) ? "Rx" :
4413 + ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx"));
4414 err = request_irq(adapter->msix_entries[vector].vector,
4415 - handler, 0, adapter->name[vector],
4416 - &(adapter->q_vector[vector]));
4417 + handler, 0, adapter->name[vector],
4418 + &(adapter->q_vector[vector]));
4419 if (err) {
4420 DPRINTK(PROBE, ERR,
4421 - "request_irq failed for MSIX interrupt "
4422 - "Error: %d\n", err);
4423 + "request_irq failed for MSIX interrupt "
4424 + "Error: %d\n", err);
4425 goto free_queue_irqs;
4426 }
4427 }
4428
4429 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
4430 err = request_irq(adapter->msix_entries[vector].vector,
4431 - &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
4432 + &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
4433 if (err) {
4434 DPRINTK(PROBE, ERR,
4435 "request_irq for msix_lsc failed: %d\n", err);
4436 @@ -1134,7 +1236,7 @@ static int ixgbe_request_msix_irqs(struc
4437 free_queue_irqs:
4438 for (i = vector - 1; i >= 0; i--)
4439 free_irq(adapter->msix_entries[--vector].vector,
4440 - &(adapter->q_vector[i]));
4441 + &(adapter->q_vector[i]));
4442 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
4443 pci_disable_msix(adapter->pdev);
4444 kfree(adapter->msix_entries);
4445 @@ -1152,16 +1254,16 @@ static void ixgbe_set_itr(struct ixgbe_a
4446 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
4447 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
4448
4449 - q_vector->tx_eitr = ixgbe_update_itr(adapter, new_itr,
4450 - q_vector->tx_eitr,
4451 - tx_ring->total_packets,
4452 - tx_ring->total_bytes);
4453 - q_vector->rx_eitr = ixgbe_update_itr(adapter, new_itr,
4454 - q_vector->rx_eitr,
4455 - rx_ring->total_packets,
4456 - rx_ring->total_bytes);
4457 + q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
4458 + q_vector->tx_itr,
4459 + tx_ring->total_packets,
4460 + tx_ring->total_bytes);
4461 + q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
4462 + q_vector->rx_itr,
4463 + rx_ring->total_packets,
4464 + rx_ring->total_bytes);
4465
4466 - current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr);
4467 + current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
4468
4469 switch (current_itr) {
4470 /* counts and packets in update_itr are dependent on these numbers */
4471 @@ -1206,19 +1308,19 @@ static irqreturn_t ixgbe_intr(int irq, v
4472 struct ixgbe_hw *hw = &adapter->hw;
4473 u32 eicr;
4474
4475 -
4476 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
4477 * therefore no explict interrupt disable is necessary */
4478 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4479 - if (!eicr)
4480 + if (!eicr) {
4481 + /* shared interrupt alert!
4482 + * make sure interrupts are enabled because the read will
4483 + * have disabled interrupts due to EIAM */
4484 + ixgbe_irq_enable(adapter);
4485 return IRQ_NONE; /* Not our interrupt */
4486 -
4487 - if (eicr & IXGBE_EICR_LSC) {
4488 - adapter->lsc_int++;
4489 - if (!test_bit(__IXGBE_DOWN, &adapter->state))
4490 - mod_timer(&adapter->watchdog_timer, jiffies);
4491 }
4492
4493 + if (eicr & IXGBE_EICR_LSC)
4494 + ixgbe_check_lsc(adapter);
4495
4496 if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) {
4497 adapter->tx_ring[0].total_packets = 0;
4498 @@ -1261,10 +1363,10 @@ static int ixgbe_request_irq(struct ixgb
4499 err = ixgbe_request_msix_irqs(adapter);
4500 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
4501 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
4502 - netdev->name, netdev);
4503 + netdev->name, netdev);
4504 } else {
4505 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
4506 - netdev->name, netdev);
4507 + netdev->name, netdev);
4508 }
4509
4510 if (err)
4511 @@ -1288,7 +1390,7 @@ static void ixgbe_free_irq(struct ixgbe_
4512 i--;
4513 for (; i >= 0; i--) {
4514 free_irq(adapter->msix_entries[i].vector,
4515 - &(adapter->q_vector[i]));
4516 + &(adapter->q_vector[i]));
4517 }
4518
4519 ixgbe_reset_q_vectors(adapter);
4520 @@ -1335,7 +1437,7 @@ static void ixgbe_configure_msi_and_lega
4521 struct ixgbe_hw *hw = &adapter->hw;
4522
4523 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
4524 - EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr));
4525 + EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param));
4526
4527 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
4528 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0);
4529 @@ -1347,26 +1449,31 @@ static void ixgbe_configure_msi_and_lega
4530 }
4531
4532 /**
4533 - * ixgbe_configure_tx - Configure 8254x Transmit Unit after Reset
4534 + * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
4535 * @adapter: board private structure
4536 *
4537 * Configure the Tx unit of the MAC after a reset.
4538 **/
4539 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
4540 {
4541 - u64 tdba;
4542 + u64 tdba, tdwba;
4543 struct ixgbe_hw *hw = &adapter->hw;
4544 u32 i, j, tdlen, txctrl;
4545
4546 /* Setup the HW Tx Head and Tail descriptor pointers */
4547 for (i = 0; i < adapter->num_tx_queues; i++) {
4548 - j = adapter->tx_ring[i].reg_idx;
4549 - tdba = adapter->tx_ring[i].dma;
4550 - tdlen = adapter->tx_ring[i].count *
4551 - sizeof(union ixgbe_adv_tx_desc);
4552 + struct ixgbe_ring *ring = &adapter->tx_ring[i];
4553 + j = ring->reg_idx;
4554 + tdba = ring->dma;
4555 + tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
4556 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
4557 - (tdba & DMA_32BIT_MASK));
4558 + (tdba & DMA_32BIT_MASK));
4559 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
4560 + tdwba = ring->dma +
4561 + (ring->count * sizeof(union ixgbe_adv_tx_desc));
4562 + tdwba |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
4563 + IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(j), tdwba & DMA_32BIT_MASK);
4564 + IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(j), (tdwba >> 32));
4565 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
4566 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
4567 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
4568 @@ -1375,20 +1482,59 @@ static void ixgbe_configure_tx(struct ix
4569 /* Disable Tx Head Writeback RO bit, since this hoses
4570 * bookkeeping if things aren't delivered in order.
4571 */
4572 - txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
4573 + txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
4574 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
4575 - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
4576 + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
4577 }
4578 }
4579
4580 -#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
4581 - (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
4582 +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
4583 +
4584 +static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
4585 +{
4586 + struct ixgbe_ring *rx_ring;
4587 + u32 srrctl;
4588 + int queue0;
4589 + unsigned long mask;
4590 +
4591 + /* we must program one srrctl register per RSS queue since we
4592 + * have enabled RDRXCTL.MVMEN
4593 + */
4594 + mask = (unsigned long)adapter->ring_feature[RING_F_RSS].mask;
4595 + queue0 = index & mask;
4596 + index = index & mask;
4597 +
4598 + rx_ring = &adapter->rx_ring[queue0];
4599 +
4600 + srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
4601 +
4602 + srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
4603 + srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
4604 +
4605 + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
4606 + srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
4607 + srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
4608 + srrctl |= ((IXGBE_RX_HDR_SIZE <<
4609 + IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4610 + IXGBE_SRRCTL_BSIZEHDR_MASK);
4611 + } else {
4612 + srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4613 +
4614 + if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
4615 + srrctl |= IXGBE_RXBUFFER_2048 >>
4616 + IXGBE_SRRCTL_BSIZEPKT_SHIFT;
4617 + else
4618 + srrctl |= rx_ring->rx_buf_len >>
4619 + IXGBE_SRRCTL_BSIZEPKT_SHIFT;
4620 + }
4621 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
4622 +}
4623
4624 -#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
4625 +#ifdef CONFIG_IXGBE_LRO
4626 /**
4627 * ixgbe_get_skb_hdr - helper function for LRO header processing
4628 * @skb: pointer to sk_buff to be added to LRO packet
4629 - * @iphdr: pointer to tcp header structure
4630 + * @iphdr: pointer to ip header structure
4631 * @tcph: pointer to tcp header structure
4632 * @hdr_flags: pointer to header flags
4633 * @priv: private data
4634 @@ -1399,8 +1545,8 @@ static int ixgbe_get_skb_hdr(struct sk_b
4635 union ixgbe_adv_rx_desc *rx_desc = priv;
4636
4637 /* Verify that this is a valid IPv4 TCP packet */
4638 - if (!(rx_desc->wb.lower.lo_dword.pkt_info &
4639 - (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)))
4640 + if (!((ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_IPV4) &&
4641 + (ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_TCP)))
4642 return -1;
4643
4644 /* Set network headers */
4645 @@ -1412,8 +1558,12 @@ static int ixgbe_get_skb_hdr(struct sk_b
4646 return 0;
4647 }
4648
4649 +#endif /* CONFIG_IXGBE_LRO */
4650 +#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
4651 + (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
4652 +
4653 /**
4654 - * ixgbe_configure_rx - Configure 8254x Receive Unit after Reset
4655 + * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
4656 * @adapter: board private structure
4657 *
4658 * Configure the Rx unit of the MAC after a reset.
4659 @@ -1426,25 +1576,26 @@ static void ixgbe_configure_rx(struct ix
4660 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4661 int i, j;
4662 u32 rdlen, rxctrl, rxcsum;
4663 - u32 random[10];
4664 + static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
4665 + 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
4666 + 0x6A3E67EA, 0x14364D17, 0x3BED200D};
4667 u32 fctrl, hlreg0;
4668 u32 pages;
4669 - u32 reta = 0, mrqc, srrctl;
4670 + u32 reta = 0, mrqc;
4671 + u32 rdrxctl;
4672 + int rx_buf_len;
4673
4674 /* Decide whether to use packet split mode or not */
4675 - if (netdev->mtu > ETH_DATA_LEN)
4676 - adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
4677 - else
4678 - adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
4679 + adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
4680
4681 /* Set the RX buffer length according to the mode */
4682 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
4683 - adapter->rx_buf_len = IXGBE_RX_HDR_SIZE;
4684 + rx_buf_len = IXGBE_RX_HDR_SIZE;
4685 } else {
4686 if (netdev->mtu <= ETH_DATA_LEN)
4687 - adapter->rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
4688 + rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
4689 else
4690 - adapter->rx_buf_len = ALIGN(max_frame, 1024);
4691 + rx_buf_len = ALIGN(max_frame, 1024);
4692 }
4693
4694 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4695 @@ -1461,28 +1612,6 @@ static void ixgbe_configure_rx(struct ix
4696
4697 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
4698
4699 - srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0));
4700 - srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
4701 - srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
4702 -
4703 - if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
4704 - srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
4705 - srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
4706 - srrctl |= ((IXGBE_RX_HDR_SIZE <<
4707 - IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4708 - IXGBE_SRRCTL_BSIZEHDR_MASK);
4709 - } else {
4710 - srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4711 -
4712 - if (adapter->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
4713 - srrctl |=
4714 - IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
4715 - else
4716 - srrctl |=
4717 - adapter->rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
4718 - }
4719 - IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl);
4720 -
4721 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
4722 /* disable receives while setting up the descriptors */
4723 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4724 @@ -1492,25 +1621,45 @@ static void ixgbe_configure_rx(struct ix
4725 * the Base and Length of the Rx Descriptor Ring */
4726 for (i = 0; i < adapter->num_rx_queues; i++) {
4727 rdba = adapter->rx_ring[i].dma;
4728 - IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), (rdba & DMA_32BIT_MASK));
4729 - IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
4730 - IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), rdlen);
4731 - IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
4732 - IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
4733 - adapter->rx_ring[i].head = IXGBE_RDH(i);
4734 - adapter->rx_ring[i].tail = IXGBE_RDT(i);
4735 - }
4736 -
4737 - /* Intitial LRO Settings */
4738 - adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE;
4739 - adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS;
4740 - adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
4741 - adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
4742 - if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
4743 - adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
4744 - adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
4745 - adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
4746 - adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
4747 + j = adapter->rx_ring[i].reg_idx;
4748 + IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_32BIT_MASK));
4749 + IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
4750 + IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
4751 + IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
4752 + IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
4753 + adapter->rx_ring[i].head = IXGBE_RDH(j);
4754 + adapter->rx_ring[i].tail = IXGBE_RDT(j);
4755 + adapter->rx_ring[i].rx_buf_len = rx_buf_len;
4756 +#ifdef CONFIG_IXGBE_LRO
4757 + /* Intitial LRO Settings */
4758 + adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE;
4759 + adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS;
4760 + adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
4761 + adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
4762 + if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
4763 + adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
4764 + adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
4765 + adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
4766 + adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
4767 +#endif
4768 +
4769 + ixgbe_configure_srrctl(adapter, j);
4770 + }
4771 +
4772 + /*
4773 + * For VMDq support of different descriptor types or
4774 + * buffer sizes through the use of multiple SRRCTL
4775 + * registers, RDRXCTL.MVMEN must be set to 1
4776 + *
4777 + * also, the manual doesn't mention it clearly but DCA hints
4778 + * will only use queue 0's tags unless this bit is set. Side
4779 + * effects of setting this bit are only that SRRCTL must be
4780 + * fully programmed [0..15]
4781 + */
4782 + rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4783 + rdrxctl |= IXGBE_RDRXCTL_MVMEN;
4784 + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4785 +
4786
4787 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4788 /* Fill out redirection table */
4789 @@ -1525,22 +1674,20 @@ static void ixgbe_configure_rx(struct ix
4790 }
4791
4792 /* Fill out hash function seeds */
4793 - /* XXX use a random constant here to glue certain flows */
4794 - get_random_bytes(&random[0], 40);
4795 for (i = 0; i < 10; i++)
4796 - IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
4797 + IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
4798
4799 mrqc = IXGBE_MRQC_RSSEN
4800 /* Perform hash on these packet types */
4801 - | IXGBE_MRQC_RSS_FIELD_IPV4
4802 - | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
4803 - | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
4804 - | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
4805 - | IXGBE_MRQC_RSS_FIELD_IPV6_EX
4806 - | IXGBE_MRQC_RSS_FIELD_IPV6
4807 - | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
4808 - | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
4809 - | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
4810 + | IXGBE_MRQC_RSS_FIELD_IPV4
4811 + | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
4812 + | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
4813 + | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
4814 + | IXGBE_MRQC_RSS_FIELD_IPV6_EX
4815 + | IXGBE_MRQC_RSS_FIELD_IPV6
4816 + | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
4817 + | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
4818 + | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
4819 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4820 }
4821
4822 @@ -1562,7 +1709,7 @@ static void ixgbe_configure_rx(struct ix
4823 }
4824
4825 static void ixgbe_vlan_rx_register(struct net_device *netdev,
4826 - struct vlan_group *grp)
4827 + struct vlan_group *grp)
4828 {
4829 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4830 u32 ctrl;
4831 @@ -1586,14 +1733,16 @@ static void ixgbe_vlan_rx_register(struc
4832 static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4833 {
4834 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4835 + struct ixgbe_hw *hw = &adapter->hw;
4836
4837 /* add VID to filter table */
4838 - ixgbe_set_vfta(&adapter->hw, vid, 0, true);
4839 + hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
4840 }
4841
4842 static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4843 {
4844 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4845 + struct ixgbe_hw *hw = &adapter->hw;
4846
4847 if (!test_bit(__IXGBE_DOWN, &adapter->state))
4848 ixgbe_irq_disable(adapter);
4849 @@ -1604,7 +1753,7 @@ static void ixgbe_vlan_rx_kill_vid(struc
4850 ixgbe_irq_enable(adapter);
4851
4852 /* remove VID from filter table */
4853 - ixgbe_set_vfta(&adapter->hw, vid, 0, false);
4854 + hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
4855 }
4856
4857 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
4858 @@ -1621,23 +1770,37 @@ static void ixgbe_restore_vlan(struct ix
4859 }
4860 }
4861
4862 +static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
4863 +{
4864 + struct dev_mc_list *mc_ptr;
4865 + u8 *addr = *mc_addr_ptr;
4866 + *vmdq = 0;
4867 +
4868 + mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
4869 + if (mc_ptr->next)
4870 + *mc_addr_ptr = mc_ptr->next->dmi_addr;
4871 + else
4872 + *mc_addr_ptr = NULL;
4873 +
4874 + return addr;
4875 +}
4876 +
4877 /**
4878 - * ixgbe_set_multi - Multicast and Promiscuous mode set
4879 + * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
4880 * @netdev: network interface device structure
4881 *
4882 - * The set_multi entry point is called whenever the multicast address
4883 - * list or the network interface flags are updated. This routine is
4884 - * responsible for configuring the hardware for proper multicast,
4885 - * promiscuous mode, and all-multi behavior.
4886 + * The set_rx_method entry point is called whenever the unicast/multicast
4887 + * address list or the network interface flags are updated. This routine is
4888 + * responsible for configuring the hardware for proper unicast, multicast and
4889 + * promiscuous mode.
4890 **/
4891 -static void ixgbe_set_multi(struct net_device *netdev)
4892 +static void ixgbe_set_rx_mode(struct net_device *netdev)
4893 {
4894 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4895 struct ixgbe_hw *hw = &adapter->hw;
4896 - struct dev_mc_list *mc_ptr;
4897 - u8 *mta_list;
4898 u32 fctrl, vlnctrl;
4899 - int i;
4900 + u8 *addr_list = NULL;
4901 + int addr_count = 0;
4902
4903 /* Check for Promiscuous and All Multicast modes */
4904
4905 @@ -1645,6 +1808,7 @@ static void ixgbe_set_multi(struct net_d
4906 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4907
4908 if (netdev->flags & IFF_PROMISC) {
4909 + hw->addr_ctrl.user_set_promisc = 1;
4910 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4911 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
4912 } else {
4913 @@ -1655,33 +1819,25 @@ static void ixgbe_set_multi(struct net_d
4914 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4915 }
4916 vlnctrl |= IXGBE_VLNCTRL_VFE;
4917 + hw->addr_ctrl.user_set_promisc = 0;
4918 }
4919
4920 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4921 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4922
4923 - if (netdev->mc_count) {
4924 - mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC);
4925 - if (!mta_list)
4926 - return;
4927 -
4928 - /* Shared function expects packed array of only addresses. */
4929 - mc_ptr = netdev->mc_list;
4930 -
4931 - for (i = 0; i < netdev->mc_count; i++) {
4932 - if (!mc_ptr)
4933 - break;
4934 - memcpy(mta_list + (i * ETH_ALEN), mc_ptr->dmi_addr,
4935 - ETH_ALEN);
4936 - mc_ptr = mc_ptr->next;
4937 - }
4938 -
4939 - ixgbe_update_mc_addr_list(hw, mta_list, i, 0);
4940 - kfree(mta_list);
4941 - } else {
4942 - ixgbe_update_mc_addr_list(hw, NULL, 0, 0);
4943 - }
4944 -
4945 + /* reprogram secondary unicast list */
4946 + addr_count = netdev->uc_count;
4947 + if (addr_count)
4948 + addr_list = netdev->uc_list->dmi_addr;
4949 + hw->mac.ops.update_uc_addr_list(hw, addr_list, addr_count,
4950 + ixgbe_addr_list_itr);
4951 +
4952 + /* reprogram multicast list */
4953 + addr_count = netdev->mc_count;
4954 + if (addr_count)
4955 + addr_list = netdev->mc_list->dmi_addr;
4956 + hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
4957 + ixgbe_addr_list_itr);
4958 }
4959
4960 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
4961 @@ -1695,10 +1851,16 @@ static void ixgbe_napi_enable_all(struct
4962 q_vectors = 1;
4963
4964 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
4965 + struct napi_struct *napi;
4966 q_vector = &adapter->q_vector[q_idx];
4967 if (!q_vector->rxr_count)
4968 continue;
4969 - napi_enable(&q_vector->napi);
4970 + napi = &q_vector->napi;
4971 + if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) &&
4972 + (q_vector->rxr_count > 1))
4973 + napi->poll = &ixgbe_clean_rxonly_many;
4974 +
4975 + napi_enable(napi);
4976 }
4977 }
4978
4979 @@ -1725,7 +1887,7 @@ static void ixgbe_configure(struct ixgbe
4980 struct net_device *netdev = adapter->netdev;
4981 int i;
4982
4983 - ixgbe_set_multi(netdev);
4984 + ixgbe_set_rx_mode(netdev);
4985
4986 ixgbe_restore_vlan(adapter);
4987
4988 @@ -1733,7 +1895,7 @@ static void ixgbe_configure(struct ixgbe
4989 ixgbe_configure_rx(adapter);
4990 for (i = 0; i < adapter->num_rx_queues; i++)
4991 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
4992 - (adapter->rx_ring[i].count - 1));
4993 + (adapter->rx_ring[i].count - 1));
4994 }
4995
4996 static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
4997 @@ -1751,7 +1913,7 @@ static int ixgbe_up_complete(struct ixgb
4998 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
4999 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5000 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
5001 - IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
5002 + IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
5003 } else {
5004 /* MSI only */
5005 gpie = 0;
5006 @@ -1778,6 +1940,8 @@ static int ixgbe_up_complete(struct ixgb
5007 for (i = 0; i < adapter->num_tx_queues; i++) {
5008 j = adapter->tx_ring[i].reg_idx;
5009 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
5010 + /* enable WTHRESH=8 descriptors, to encourage burst writeback */
5011 + txdctl |= (8 << 16);
5012 txdctl |= IXGBE_TXDCTL_ENABLE;
5013 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
5014 }
5015 @@ -1812,6 +1976,8 @@ static int ixgbe_up_complete(struct ixgb
5016
5017 /* bring the link up in the watchdog, this could race with our first
5018 * link up interrupt but shouldn't be a problem */
5019 + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5020 + adapter->link_check_timeout = jiffies;
5021 mod_timer(&adapter->watchdog_timer, jiffies);
5022 return 0;
5023 }
5024 @@ -1836,50 +2002,14 @@ int ixgbe_up(struct ixgbe_adapter *adapt
5025
5026 void ixgbe_reset(struct ixgbe_adapter *adapter)
5027 {
5028 - if (ixgbe_init_hw(&adapter->hw))
5029 - DPRINTK(PROBE, ERR, "Hardware Error\n");
5030 + struct ixgbe_hw *hw = &adapter->hw;
5031 + if (hw->mac.ops.init_hw(hw))
5032 + dev_err(&adapter->pdev->dev, "Hardware Error\n");
5033
5034 /* reprogram the RAR[0] in case user changed it. */
5035 - ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
5036 -
5037 -}
5038 -
5039 -#ifdef CONFIG_PM
5040 -static int ixgbe_resume(struct pci_dev *pdev)
5041 -{
5042 - struct net_device *netdev = pci_get_drvdata(pdev);
5043 - struct ixgbe_adapter *adapter = netdev_priv(netdev);
5044 - u32 err;
5045 -
5046 - pci_set_power_state(pdev, PCI_D0);
5047 - pci_restore_state(pdev);
5048 - err = pci_enable_device(pdev);
5049 - if (err) {
5050 - printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \
5051 - "suspend\n");
5052 - return err;
5053 - }
5054 - pci_set_master(pdev);
5055 -
5056 - pci_enable_wake(pdev, PCI_D3hot, 0);
5057 - pci_enable_wake(pdev, PCI_D3cold, 0);
5058 -
5059 - if (netif_running(netdev)) {
5060 - err = ixgbe_request_irq(adapter);
5061 - if (err)
5062 - return err;
5063 - }
5064 -
5065 - ixgbe_reset(adapter);
5066 -
5067 - if (netif_running(netdev))
5068 - ixgbe_up(adapter);
5069 -
5070 - netif_device_attach(netdev);
5071 + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
5072
5073 - return 0;
5074 }
5075 -#endif
5076
5077 /**
5078 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
5079 @@ -1887,7 +2017,7 @@ static int ixgbe_resume(struct pci_dev *
5080 * @rx_ring: ring to free buffers from
5081 **/
5082 static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
5083 - struct ixgbe_ring *rx_ring)
5084 + struct ixgbe_ring *rx_ring)
5085 {
5086 struct pci_dev *pdev = adapter->pdev;
5087 unsigned long size;
5088 @@ -1901,8 +2031,8 @@ static void ixgbe_clean_rx_ring(struct i
5089 rx_buffer_info = &rx_ring->rx_buffer_info[i];
5090 if (rx_buffer_info->dma) {
5091 pci_unmap_single(pdev, rx_buffer_info->dma,
5092 - adapter->rx_buf_len,
5093 - PCI_DMA_FROMDEVICE);
5094 + rx_ring->rx_buf_len,
5095 + PCI_DMA_FROMDEVICE);
5096 rx_buffer_info->dma = 0;
5097 }
5098 if (rx_buffer_info->skb) {
5099 @@ -1911,12 +2041,12 @@ static void ixgbe_clean_rx_ring(struct i
5100 }
5101 if (!rx_buffer_info->page)
5102 continue;
5103 - pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE,
5104 - PCI_DMA_FROMDEVICE);
5105 + pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
5106 + PCI_DMA_FROMDEVICE);
5107 rx_buffer_info->page_dma = 0;
5108 -
5109 put_page(rx_buffer_info->page);
5110 rx_buffer_info->page = NULL;
5111 + rx_buffer_info->page_offset = 0;
5112 }
5113
5114 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
5115 @@ -1938,7 +2068,7 @@ static void ixgbe_clean_rx_ring(struct i
5116 * @tx_ring: ring to be cleaned
5117 **/
5118 static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
5119 - struct ixgbe_ring *tx_ring)
5120 + struct ixgbe_ring *tx_ring)
5121 {
5122 struct ixgbe_tx_buffer *tx_buffer_info;
5123 unsigned long size;
5124 @@ -1991,96 +2121,85 @@ static void ixgbe_clean_all_tx_rings(str
5125 void ixgbe_down(struct ixgbe_adapter *adapter)
5126 {
5127 struct net_device *netdev = adapter->netdev;
5128 + struct ixgbe_hw *hw = &adapter->hw;
5129 u32 rxctrl;
5130 + u32 txdctl;
5131 + int i, j;
5132
5133 /* signal that we are down to the interrupt handler */
5134 set_bit(__IXGBE_DOWN, &adapter->state);
5135
5136 /* disable receives */
5137 - rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
5138 - IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL,
5139 - rxctrl & ~IXGBE_RXCTRL_RXEN);
5140 + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5141 + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
5142
5143 netif_tx_disable(netdev);
5144
5145 - /* disable transmits in the hardware */
5146 -
5147 - /* flush both disables */
5148 - IXGBE_WRITE_FLUSH(&adapter->hw);
5149 + IXGBE_WRITE_FLUSH(hw);
5150 msleep(10);
5151
5152 + netif_tx_stop_all_queues(netdev);
5153 +
5154 ixgbe_irq_disable(adapter);
5155
5156 ixgbe_napi_disable_all(adapter);
5157 +
5158 del_timer_sync(&adapter->watchdog_timer);
5159 + cancel_work_sync(&adapter->watchdog_task);
5160 +
5161 + /* disable transmits in the hardware now that interrupts are off */
5162 + for (i = 0; i < adapter->num_tx_queues; i++) {
5163 + j = adapter->tx_ring[i].reg_idx;
5164 + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
5165 + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
5166 + (txdctl & ~IXGBE_TXDCTL_ENABLE));
5167 + }
5168
5169 netif_carrier_off(netdev);
5170 - netif_tx_stop_all_queues(netdev);
5171
5172 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
5173 + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
5174 + adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
5175 + dca_remove_requester(&adapter->pdev->dev);
5176 + }
5177 +
5178 +#endif
5179 if (!pci_channel_offline(adapter->pdev))
5180 ixgbe_reset(adapter);
5181 ixgbe_clean_all_tx_rings(adapter);
5182 ixgbe_clean_all_rx_rings(adapter);
5183
5184 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
5185 + /* since we reset the hardware DCA settings were cleared */
5186 + if (dca_add_requester(&adapter->pdev->dev) == 0) {
5187 + adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
5188 + /* always use CB2 mode, difference is masked
5189 + * in the CB driver */
5190 + IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
5191 + ixgbe_setup_dca(adapter);
5192 + }
5193 +#endif
5194 }
5195
5196 -static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
5197 +/**
5198 + * ixgbe_poll - NAPI Rx polling callback
5199 + * @napi: structure for representing this polling device
5200 + * @budget: how many packets driver is allowed to clean
5201 + *
5202 + * This function is used for legacy and MSI, NAPI mode
5203 + **/
5204 +static int ixgbe_poll(struct napi_struct *napi, int budget)
5205 {
5206 - struct net_device *netdev = pci_get_drvdata(pdev);
5207 - struct ixgbe_adapter *adapter = netdev_priv(netdev);
5208 -#ifdef CONFIG_PM
5209 - int retval = 0;
5210 -#endif
5211 -
5212 - netif_device_detach(netdev);
5213 -
5214 - if (netif_running(netdev)) {
5215 - ixgbe_down(adapter);
5216 - ixgbe_free_irq(adapter);
5217 - }
5218 -
5219 -#ifdef CONFIG_PM
5220 - retval = pci_save_state(pdev);
5221 - if (retval)
5222 - return retval;
5223 -#endif
5224 -
5225 - pci_enable_wake(pdev, PCI_D3hot, 0);
5226 - pci_enable_wake(pdev, PCI_D3cold, 0);
5227 -
5228 - ixgbe_release_hw_control(adapter);
5229 -
5230 - pci_disable_device(pdev);
5231 -
5232 - pci_set_power_state(pdev, pci_choose_state(pdev, state));
5233 -
5234 - return 0;
5235 -}
5236 -
5237 -static void ixgbe_shutdown(struct pci_dev *pdev)
5238 -{
5239 - ixgbe_suspend(pdev, PMSG_SUSPEND);
5240 -}
5241 -
5242 -/**
5243 - * ixgbe_poll - NAPI Rx polling callback
5244 - * @napi: structure for representing this polling device
5245 - * @budget: how many packets driver is allowed to clean
5246 - *
5247 - * This function is used for legacy and MSI, NAPI mode
5248 - **/
5249 -static int ixgbe_poll(struct napi_struct *napi, int budget)
5250 -{
5251 - struct ixgbe_q_vector *q_vector = container_of(napi,
5252 - struct ixgbe_q_vector, napi);
5253 - struct ixgbe_adapter *adapter = q_vector->adapter;
5254 - int tx_cleaned = 0, work_done = 0;
5255 -
5256 -#ifdef CONFIG_DCA
5257 - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
5258 - ixgbe_update_tx_dca(adapter, adapter->tx_ring);
5259 - ixgbe_update_rx_dca(adapter, adapter->rx_ring);
5260 - }
5261 + struct ixgbe_q_vector *q_vector = container_of(napi,
5262 + struct ixgbe_q_vector, napi);
5263 + struct ixgbe_adapter *adapter = q_vector->adapter;
5264 + int tx_cleaned, work_done = 0;
5265 +
5266 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
5267 + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
5268 + ixgbe_update_tx_dca(adapter, adapter->tx_ring);
5269 + ixgbe_update_rx_dca(adapter, adapter->rx_ring);
5270 + }
5271 #endif
5272
5273 tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
5274 @@ -2092,12 +2211,11 @@ static int ixgbe_poll(struct napi_struct
5275 /* If budget not fully consumed, exit the polling mode */
5276 if (work_done < budget) {
5277 netif_rx_complete(adapter->netdev, napi);
5278 - if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
5279 + if (adapter->itr_setting & 3)
5280 ixgbe_set_itr(adapter);
5281 if (!test_bit(__IXGBE_DOWN, &adapter->state))
5282 ixgbe_irq_enable(adapter);
5283 }
5284 -
5285 return work_done;
5286 }
5287
5288 @@ -2123,8 +2241,48 @@ static void ixgbe_reset_task(struct work
5289 ixgbe_reinit_locked(adapter);
5290 }
5291
5292 +static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
5293 +{
5294 + int nrq = 1, ntq = 1;
5295 + int feature_mask = 0, rss_i, rss_m;
5296 +
5297 + /* Number of supported queues */
5298 + switch (adapter->hw.mac.type) {
5299 + case ixgbe_mac_82598EB:
5300 + rss_i = adapter->ring_feature[RING_F_RSS].indices;
5301 + rss_m = 0;
5302 + feature_mask |= IXGBE_FLAG_RSS_ENABLED;
5303 +
5304 + switch (adapter->flags & feature_mask) {
5305 + case (IXGBE_FLAG_RSS_ENABLED):
5306 + rss_m = 0xF;
5307 + nrq = rss_i;
5308 + ntq = rss_i;
5309 + break;
5310 + case 0:
5311 + default:
5312 + rss_i = 0;
5313 + rss_m = 0;
5314 + nrq = 1;
5315 + ntq = 1;
5316 + break;
5317 + }
5318 +
5319 + adapter->ring_feature[RING_F_RSS].indices = rss_i;
5320 + adapter->ring_feature[RING_F_RSS].mask = rss_m;
5321 + break;
5322 + default:
5323 + nrq = 1;
5324 + ntq = 1;
5325 + break;
5326 + }
5327 +
5328 + adapter->num_rx_queues = nrq;
5329 + adapter->num_tx_queues = ntq;
5330 +}
5331 +
5332 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
5333 - int vectors)
5334 + int vectors)
5335 {
5336 int err, vector_threshold;
5337
5338 @@ -2143,7 +2301,7 @@ static void ixgbe_acquire_msix_vectors(s
5339 */
5340 while (vectors >= vector_threshold) {
5341 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
5342 - vectors);
5343 + vectors);
5344 if (!err) /* Success in acquiring all requested vectors. */
5345 break;
5346 else if (err < 0)
5347 @@ -2162,54 +2320,13 @@ static void ixgbe_acquire_msix_vectors(s
5348 kfree(adapter->msix_entries);
5349 adapter->msix_entries = NULL;
5350 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
5351 - adapter->num_tx_queues = 1;
5352 - adapter->num_rx_queues = 1;
5353 + ixgbe_set_num_queues(adapter);
5354 } else {
5355 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
5356 adapter->num_msix_vectors = vectors;
5357 }
5358 }
5359
5360 -static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
5361 -{
5362 - int nrq, ntq;
5363 - int feature_mask = 0, rss_i, rss_m;
5364 -
5365 - /* Number of supported queues */
5366 - switch (adapter->hw.mac.type) {
5367 - case ixgbe_mac_82598EB:
5368 - rss_i = adapter->ring_feature[RING_F_RSS].indices;
5369 - rss_m = 0;
5370 - feature_mask |= IXGBE_FLAG_RSS_ENABLED;
5371 -
5372 - switch (adapter->flags & feature_mask) {
5373 - case (IXGBE_FLAG_RSS_ENABLED):
5374 - rss_m = 0xF;
5375 - nrq = rss_i;
5376 - ntq = rss_i;
5377 - break;
5378 - case 0:
5379 - default:
5380 - rss_i = 0;
5381 - rss_m = 0;
5382 - nrq = 1;
5383 - ntq = 1;
5384 - break;
5385 - }
5386 -
5387 - adapter->ring_feature[RING_F_RSS].indices = rss_i;
5388 - adapter->ring_feature[RING_F_RSS].mask = rss_m;
5389 - break;
5390 - default:
5391 - nrq = 1;
5392 - ntq = 1;
5393 - break;
5394 - }
5395 -
5396 - adapter->num_rx_queues = nrq;
5397 - adapter->num_tx_queues = ntq;
5398 -}
5399 -
5400 /**
5401 * ixgbe_cache_ring_register - Descriptor ring to register mapping
5402 * @adapter: board private structure to initialize
5403 @@ -2219,9 +2336,6 @@ static void __devinit ixgbe_set_num_queu
5404 **/
5405 static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
5406 {
5407 - /* TODO: Remove all uses of the indices in the cases where multiple
5408 - * features are OR'd together, if the feature set makes sense.
5409 - */
5410 int feature_mask = 0, rss_i;
5411 int i, txr_idx, rxr_idx;
5412
5413 @@ -2262,21 +2376,22 @@ static int __devinit ixgbe_alloc_queues(
5414 int i;
5415
5416 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
5417 - sizeof(struct ixgbe_ring), GFP_KERNEL);
5418 + sizeof(struct ixgbe_ring), GFP_KERNEL);
5419 if (!adapter->tx_ring)
5420 goto err_tx_ring_allocation;
5421
5422 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
5423 - sizeof(struct ixgbe_ring), GFP_KERNEL);
5424 + sizeof(struct ixgbe_ring), GFP_KERNEL);
5425 if (!adapter->rx_ring)
5426 goto err_rx_ring_allocation;
5427
5428 for (i = 0; i < adapter->num_tx_queues; i++) {
5429 - adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD;
5430 + adapter->tx_ring[i].count = adapter->tx_ring_count;
5431 adapter->tx_ring[i].queue_index = i;
5432 }
5433 +
5434 for (i = 0; i < adapter->num_rx_queues; i++) {
5435 - adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD;
5436 + adapter->rx_ring[i].count = adapter->rx_ring_count;
5437 adapter->rx_ring[i].queue_index = i;
5438 }
5439
5440 @@ -2298,25 +2413,19 @@ err_tx_ring_allocation:
5441 * capabilities of the hardware and the kernel.
5442 **/
5443 static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
5444 - *adapter)
5445 + *adapter)
5446 {
5447 int err = 0;
5448 int vector, v_budget;
5449
5450 /*
5451 - * Set the default interrupt throttle rate.
5452 - */
5453 - adapter->rx_eitr = (1000000 / IXGBE_DEFAULT_ITR_RX_USECS);
5454 - adapter->tx_eitr = (1000000 / IXGBE_DEFAULT_ITR_TX_USECS);
5455 -
5456 - /*
5457 * It's easy to be greedy for MSI-X vectors, but it really
5458 * doesn't do us much good if we have a lot more vectors
5459 * than CPU's. So let's be conservative and only ask for
5460 * (roughly) twice the number of vectors as there are CPU's.
5461 */
5462 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
5463 - (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
5464 + (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
5465
5466 /*
5467 * At the same time, hardware can only support a maximum of
5468 @@ -2330,7 +2439,7 @@ static int __devinit ixgbe_set_interrupt
5469 /* A failure in MSI-X entry allocation isn't fatal, but it does
5470 * mean we disable MSI-X capabilities of the adapter. */
5471 adapter->msix_entries = kcalloc(v_budget,
5472 - sizeof(struct msix_entry), GFP_KERNEL);
5473 + sizeof(struct msix_entry), GFP_KERNEL);
5474 if (!adapter->msix_entries) {
5475 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
5476 ixgbe_set_num_queues(adapter);
5477 @@ -2339,7 +2448,7 @@ static int __devinit ixgbe_set_interrupt
5478 err = ixgbe_alloc_queues(adapter);
5479 if (err) {
5480 DPRINTK(PROBE, ERR, "Unable to allocate memory "
5481 - "for queues\n");
5482 + "for queues\n");
5483 goto out;
5484 }
5485
5486 @@ -2360,7 +2469,7 @@ try_msi:
5487 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
5488 } else {
5489 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
5490 - "falling back to legacy. Error: %d\n", err);
5491 + "falling back to legacy. Error: %d\n", err);
5492 /* reset err */
5493 err = 0;
5494 }
5495 @@ -2416,9 +2525,9 @@ static int __devinit ixgbe_init_interrup
5496 }
5497
5498 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
5499 - "Tx Queue count = %u\n",
5500 - (adapter->num_rx_queues > 1) ? "Enabled" :
5501 - "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
5502 + "Tx Queue count = %u\n",
5503 + (adapter->num_rx_queues > 1) ? "Enabled" :
5504 + "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
5505
5506 set_bit(__IXGBE_DOWN, &adapter->state);
5507
5508 @@ -2445,33 +2554,44 @@ static int __devinit ixgbe_sw_init(struc
5509 struct pci_dev *pdev = adapter->pdev;
5510 unsigned int rss;
5511
5512 + /* PCI config space info */
5513 +
5514 + hw->vendor_id = pdev->vendor;
5515 + hw->device_id = pdev->device;
5516 + hw->revision_id = pdev->revision;
5517 + hw->subsystem_vendor_id = pdev->subsystem_vendor;
5518 + hw->subsystem_device_id = pdev->subsystem_device;
5519 +
5520 /* Set capability flags */
5521 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
5522 adapter->ring_feature[RING_F_RSS].indices = rss;
5523 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
5524
5525 - /* Enable Dynamic interrupt throttling by default */
5526 - adapter->rx_eitr = 1;
5527 - adapter->tx_eitr = 1;
5528 -
5529 /* default flow control settings */
5530 - hw->fc.original_type = ixgbe_fc_full;
5531 - hw->fc.type = ixgbe_fc_full;
5532 + hw->fc.original_type = ixgbe_fc_none;
5533 + hw->fc.type = ixgbe_fc_none;
5534 + hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
5535 + hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
5536 + hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
5537 + hw->fc.send_xon = true;
5538
5539 /* select 10G link by default */
5540 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
5541 - if (hw->mac.ops.reset(hw)) {
5542 - dev_err(&pdev->dev, "HW Init failed\n");
5543 - return -EIO;
5544 - }
5545 - if (hw->mac.ops.setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true,
5546 - false)) {
5547 - dev_err(&pdev->dev, "Link Speed setup failed\n");
5548 - return -EIO;
5549 - }
5550 +
5551 + /* enable itr by default in dynamic mode */
5552 + adapter->itr_setting = 1;
5553 + adapter->eitr_param = 20000;
5554 +
5555 + /* set defaults for eitr in MegaBytes */
5556 + adapter->eitr_low = 10;
5557 + adapter->eitr_high = 20;
5558 +
5559 + /* set default ring sizes */
5560 + adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
5561 + adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
5562
5563 /* initialize eeprom parameters */
5564 - if (ixgbe_init_eeprom(hw)) {
5565 + if (ixgbe_init_eeprom_params_generic(hw)) {
5566 dev_err(&pdev->dev, "EEPROM initialization failed\n");
5567 return -EIO;
5568 }
5569 @@ -2487,105 +2607,160 @@ static int __devinit ixgbe_sw_init(struc
5570 /**
5571 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
5572 * @adapter: board private structure
5573 - * @txdr: tx descriptor ring (for a specific queue) to setup
5574 + * @tx_ring: tx descriptor ring (for a specific queue) to setup
5575 *
5576 * Return 0 on success, negative on failure
5577 **/
5578 int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
5579 - struct ixgbe_ring *txdr)
5580 + struct ixgbe_ring *tx_ring)
5581 {
5582 struct pci_dev *pdev = adapter->pdev;
5583 int size;
5584
5585 - size = sizeof(struct ixgbe_tx_buffer) * txdr->count;
5586 - txdr->tx_buffer_info = vmalloc(size);
5587 - if (!txdr->tx_buffer_info) {
5588 - DPRINTK(PROBE, ERR,
5589 - "Unable to allocate memory for the transmit descriptor ring\n");
5590 - return -ENOMEM;
5591 - }
5592 - memset(txdr->tx_buffer_info, 0, size);
5593 + size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
5594 + tx_ring->tx_buffer_info = vmalloc(size);
5595 + if (!tx_ring->tx_buffer_info)
5596 + goto err;
5597 + memset(tx_ring->tx_buffer_info, 0, size);
5598
5599 /* round up to nearest 4K */
5600 - txdr->size = txdr->count * sizeof(union ixgbe_adv_tx_desc);
5601 - txdr->size = ALIGN(txdr->size, 4096);
5602 + tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc) +
5603 + sizeof(u32);
5604 + tx_ring->size = ALIGN(tx_ring->size, 4096);
5605 +
5606 + tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
5607 + &tx_ring->dma);
5608 + if (!tx_ring->desc)
5609 + goto err;
5610
5611 - txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
5612 - if (!txdr->desc) {
5613 - vfree(txdr->tx_buffer_info);
5614 - DPRINTK(PROBE, ERR,
5615 - "Memory allocation failed for the tx desc ring\n");
5616 - return -ENOMEM;
5617 - }
5618 + tx_ring->next_to_use = 0;
5619 + tx_ring->next_to_clean = 0;
5620 + tx_ring->work_limit = tx_ring->count;
5621 + return 0;
5622
5623 - txdr->next_to_use = 0;
5624 - txdr->next_to_clean = 0;
5625 - txdr->work_limit = txdr->count;
5626 +err:
5627 + vfree(tx_ring->tx_buffer_info);
5628 + tx_ring->tx_buffer_info = NULL;
5629 + DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
5630 + "descriptor ring\n");
5631 + return -ENOMEM;
5632 +}
5633
5634 - return 0;
5635 +/**
5636 + * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
5637 + * @adapter: board private structure
5638 + *
5639 + * If this function returns with an error, then it's possible one or
5640 + * more of the rings is populated (while the rest are not). It is the
5641 + * callers duty to clean those orphaned rings.
5642 + *
5643 + * Return 0 on success, negative on failure
5644 + **/
5645 +static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
5646 +{
5647 + int i, err = 0;
5648 +
5649 + for (i = 0; i < adapter->num_tx_queues; i++) {
5650 + err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
5651 + if (!err)
5652 + continue;
5653 + DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
5654 + break;
5655 + }
5656 +
5657 + return err;
5658 }
5659
5660 /**
5661 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
5662 * @adapter: board private structure
5663 - * @rxdr: rx descriptor ring (for a specific queue) to setup
5664 + * @rx_ring: rx descriptor ring (for a specific queue) to setup
5665 *
5666 * Returns 0 on success, negative on failure
5667 **/
5668 int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
5669 - struct ixgbe_ring *rxdr)
5670 + struct ixgbe_ring *rx_ring)
5671 {
5672 struct pci_dev *pdev = adapter->pdev;
5673 int size;
5674
5675 +#ifdef CONFIG_IXGBE_LRO
5676 size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS;
5677 - rxdr->lro_mgr.lro_arr = vmalloc(size);
5678 - if (!rxdr->lro_mgr.lro_arr)
5679 + rx_ring->lro_mgr.lro_arr = vmalloc(size);
5680 + if (!rx_ring->lro_mgr.lro_arr)
5681 return -ENOMEM;
5682 - memset(rxdr->lro_mgr.lro_arr, 0, size);
5683 -
5684 - size = sizeof(struct ixgbe_rx_buffer) * rxdr->count;
5685 - rxdr->rx_buffer_info = vmalloc(size);
5686 - if (!rxdr->rx_buffer_info) {
5687 + memset(rx_ring->lro_mgr.lro_arr, 0, size);
5688 +#endif
5689 + size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
5690 + rx_ring->rx_buffer_info = vmalloc(size);
5691 + if (!rx_ring->rx_buffer_info) {
5692 DPRINTK(PROBE, ERR,
5693 - "vmalloc allocation failed for the rx desc ring\n");
5694 + "vmalloc allocation failed for the rx desc ring\n");
5695 goto alloc_failed;
5696 }
5697 - memset(rxdr->rx_buffer_info, 0, size);
5698 + memset(rx_ring->rx_buffer_info, 0, size);
5699
5700 /* Round up to nearest 4K */
5701 - rxdr->size = rxdr->count * sizeof(union ixgbe_adv_rx_desc);
5702 - rxdr->size = ALIGN(rxdr->size, 4096);
5703 + rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
5704 + rx_ring->size = ALIGN(rx_ring->size, 4096);
5705
5706 - rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
5707 + rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
5708
5709 - if (!rxdr->desc) {
5710 + if (!rx_ring->desc) {
5711 DPRINTK(PROBE, ERR,
5712 - "Memory allocation failed for the rx desc ring\n");
5713 - vfree(rxdr->rx_buffer_info);
5714 + "Memory allocation failed for the rx desc ring\n");
5715 + vfree(rx_ring->rx_buffer_info);
5716 goto alloc_failed;
5717 }
5718
5719 - rxdr->next_to_clean = 0;
5720 - rxdr->next_to_use = 0;
5721 + rx_ring->next_to_clean = 0;
5722 + rx_ring->next_to_use = 0;
5723
5724 return 0;
5725
5726 alloc_failed:
5727 - vfree(rxdr->lro_mgr.lro_arr);
5728 - rxdr->lro_mgr.lro_arr = NULL;
5729 +#ifdef CONFIG_IXGBE_LRO
5730 + vfree(rx_ring->lro_mgr.lro_arr);
5731 + rx_ring->lro_mgr.lro_arr = NULL;
5732 +#endif
5733 return -ENOMEM;
5734 }
5735
5736 /**
5737 + * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
5738 + * @adapter: board private structure
5739 + *
5740 + * If this function returns with an error, then it's possible one or
5741 + * more of the rings is populated (while the rest are not). It is the
5742 + * callers duty to clean those orphaned rings.
5743 + *
5744 + * Return 0 on success, negative on failure
5745 + **/
5746 +
5747 +static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5748 +{
5749 + int i, err = 0;
5750 +
5751 + for (i = 0; i < adapter->num_rx_queues; i++) {
5752 + err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
5753 + if (!err)
5754 + continue;
5755 + DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
5756 + break;
5757 + }
5758 +
5759 + return err;
5760 +}
5761 +
5762 +/**
5763 * ixgbe_free_tx_resources - Free Tx Resources per Queue
5764 * @adapter: board private structure
5765 * @tx_ring: Tx descriptor ring for a specific queue
5766 *
5767 * Free all transmit software resources
5768 **/
5769 -static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
5770 - struct ixgbe_ring *tx_ring)
5771 +void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
5772 + struct ixgbe_ring *tx_ring)
5773 {
5774 struct pci_dev *pdev = adapter->pdev;
5775
5776 @@ -2620,13 +2795,15 @@ static void ixgbe_free_all_tx_resources(
5777 *
5778 * Free all receive software resources
5779 **/
5780 -static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
5781 - struct ixgbe_ring *rx_ring)
5782 +void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
5783 + struct ixgbe_ring *rx_ring)
5784 {
5785 struct pci_dev *pdev = adapter->pdev;
5786
5787 +#ifdef CONFIG_IXGBE_LRO
5788 vfree(rx_ring->lro_mgr.lro_arr);
5789 rx_ring->lro_mgr.lro_arr = NULL;
5790 +#endif
5791
5792 ixgbe_clean_rx_ring(adapter, rx_ring);
5793
5794 @@ -2653,59 +2830,6 @@ static void ixgbe_free_all_rx_resources(
5795 }
5796
5797 /**
5798 - * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
5799 - * @adapter: board private structure
5800 - *
5801 - * If this function returns with an error, then it's possible one or
5802 - * more of the rings is populated (while the rest are not). It is the
5803 - * callers duty to clean those orphaned rings.
5804 - *
5805 - * Return 0 on success, negative on failure
5806 - **/
5807 -static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
5808 -{
5809 - int i, err = 0;
5810 -
5811 - for (i = 0; i < adapter->num_tx_queues; i++) {
5812 - err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
5813 - if (err) {
5814 - DPRINTK(PROBE, ERR,
5815 - "Allocation for Tx Queue %u failed\n", i);
5816 - break;
5817 - }
5818 - }
5819 -
5820 - return err;
5821 -}
5822 -
5823 -/**
5824 - * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
5825 - * @adapter: board private structure
5826 - *
5827 - * If this function returns with an error, then it's possible one or
5828 - * more of the rings is populated (while the rest are not). It is the
5829 - * callers duty to clean those orphaned rings.
5830 - *
5831 - * Return 0 on success, negative on failure
5832 - **/
5833 -
5834 -static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5835 -{
5836 - int i, err = 0;
5837 -
5838 - for (i = 0; i < adapter->num_rx_queues; i++) {
5839 - err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
5840 - if (err) {
5841 - DPRINTK(PROBE, ERR,
5842 - "Allocation for Rx Queue %u failed\n", i);
5843 - break;
5844 - }
5845 - }
5846 -
5847 - return err;
5848 -}
5849 -
5850 -/**
5851 * ixgbe_change_mtu - Change the Maximum Transfer Unit
5852 * @netdev: network interface device structure
5853 * @new_mtu: new value for maximum frame size
5854 @@ -2717,12 +2841,12 @@ static int ixgbe_change_mtu(struct net_d
5855 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5856 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5857
5858 - if ((max_frame < (ETH_ZLEN + ETH_FCS_LEN)) ||
5859 - (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
5860 + /* MTU < 68 is an error and causes problems on some kernels */
5861 + if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
5862 return -EINVAL;
5863
5864 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
5865 - netdev->mtu, new_mtu);
5866 + netdev->mtu, new_mtu);
5867 /* must set new MTU before calling down or up */
5868 netdev->mtu = new_mtu;
5869
5870 @@ -2817,6 +2941,135 @@ static int ixgbe_close(struct net_device
5871 }
5872
5873 /**
5874 + * ixgbe_napi_add_all - prep napi structs for use
5875 + * @adapter: private struct
5876 + * helper function to napi_add each possible q_vector->napi
5877 + */
5878 +static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
5879 +{
5880 + int q_idx, q_vectors;
5881 + int (*poll)(struct napi_struct *, int);
5882 +
5883 + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5884 + poll = &ixgbe_clean_rxonly;
5885 + /* Only enable as many vectors as we have rx queues. */
5886 + q_vectors = adapter->num_rx_queues;
5887 + } else {
5888 + poll = &ixgbe_poll;
5889 + /* only one q_vector for legacy modes */
5890 + q_vectors = 1;
5891 + }
5892 +
5893 + for (q_idx = 0; q_idx < q_vectors; q_idx++) {
5894 + struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
5895 + netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
5896 + }
5897 +}
5898 +
5899 +static void ixgbe_napi_del_all(struct ixgbe_adapter *adapter)
5900 +{
5901 + int q_idx;
5902 + int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
5903 +
5904 + /* legacy and MSI only use one vector */
5905 + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
5906 + q_vectors = 1;
5907 +
5908 + for (q_idx = 0; q_idx < q_vectors; q_idx++) {
5909 + struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
5910 + if (!q_vector->rxr_count)
5911 + continue;
5912 + netif_napi_del(&q_vector->napi);
5913 + }
5914 +}
5915 +
5916 +#ifdef CONFIG_PM
5917 +static int ixgbe_resume(struct pci_dev *pdev)
5918 +{
5919 + struct net_device *netdev = pci_get_drvdata(pdev);
5920 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
5921 + u32 err;
5922 +
5923 + pci_set_power_state(pdev, PCI_D0);
5924 + pci_restore_state(pdev);
5925 + err = pci_enable_device(pdev);
5926 + if (err) {
5927 + printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
5928 + "suspend\n");
5929 + return err;
5930 + }
5931 + pci_set_master(pdev);
5932 +
5933 + pci_enable_wake(pdev, PCI_D3hot, 0);
5934 + pci_enable_wake(pdev, PCI_D3cold, 0);
5935 +
5936 + err = ixgbe_init_interrupt_scheme(adapter);
5937 + if (err) {
5938 + printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
5939 + "device\n");
5940 + return err;
5941 + }
5942 +
5943 + ixgbe_napi_add_all(adapter);
5944 + ixgbe_reset(adapter);
5945 +
5946 + if (netif_running(netdev)) {
5947 + err = ixgbe_open(adapter->netdev);
5948 + if (err)
5949 + return err;
5950 + }
5951 +
5952 + netif_device_attach(netdev);
5953 +
5954 + return 0;
5955 +}
5956 +
5957 +#endif /* CONFIG_PM */
5958 +static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
5959 +{
5960 + struct net_device *netdev = pci_get_drvdata(pdev);
5961 + struct ixgbe_adapter *adapter = netdev_priv(netdev);
5962 +#ifdef CONFIG_PM
5963 + int retval = 0;
5964 +#endif
5965 +
5966 + netif_device_detach(netdev);
5967 +
5968 + if (netif_running(netdev)) {
5969 + ixgbe_down(adapter);
5970 + ixgbe_free_irq(adapter);
5971 + ixgbe_free_all_tx_resources(adapter);
5972 + ixgbe_free_all_rx_resources(adapter);
5973 + }
5974 + ixgbe_reset_interrupt_capability(adapter);
5975 + ixgbe_napi_del_all(adapter);
5976 + kfree(adapter->tx_ring);
5977 + kfree(adapter->rx_ring);
5978 +
5979 +#ifdef CONFIG_PM
5980 + retval = pci_save_state(pdev);
5981 + if (retval)
5982 + return retval;
5983 +#endif
5984 +
5985 + pci_enable_wake(pdev, PCI_D3hot, 0);
5986 + pci_enable_wake(pdev, PCI_D3cold, 0);
5987 +
5988 + ixgbe_release_hw_control(adapter);
5989 +
5990 + pci_disable_device(pdev);
5991 +
5992 + pci_set_power_state(pdev, pci_choose_state(pdev, state));
5993 +
5994 + return 0;
5995 +}
5996 +
5997 +static void ixgbe_shutdown(struct pci_dev *pdev)
5998 +{
5999 + ixgbe_suspend(pdev, PMSG_SUSPEND);
6000 +}
6001 +
6002 +/**
6003 * ixgbe_update_stats - Update the board statistics counters.
6004 * @adapter: board private structure
6005 **/
6006 @@ -2889,7 +3142,7 @@ void ixgbe_update_stats(struct ixgbe_ada
6007
6008 /* Rx Errors */
6009 adapter->net_stats.rx_errors = adapter->stats.crcerrs +
6010 - adapter->stats.rlec;
6011 + adapter->stats.rlec;
6012 adapter->net_stats.rx_dropped = 0;
6013 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
6014 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
6015 @@ -2903,27 +3156,74 @@ void ixgbe_update_stats(struct ixgbe_ada
6016 static void ixgbe_watchdog(unsigned long data)
6017 {
6018 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
6019 + struct ixgbe_hw *hw = &adapter->hw;
6020 +
6021 + /* Do the watchdog outside of interrupt context due to the lovely
6022 + * delays that some of the newer hardware requires */
6023 + if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
6024 + /* Cause software interrupt to ensure rx rings are cleaned */
6025 + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
6026 + u32 eics =
6027 + (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
6028 + IXGBE_WRITE_REG(hw, IXGBE_EICS, eics);
6029 + } else {
6030 + /* For legacy and MSI interrupts don't set any bits that
6031 + * are enabled for EIAM, because this operation would
6032 + * set *both* EIMS and EICS for any bit in EIAM */
6033 + IXGBE_WRITE_REG(hw, IXGBE_EICS,
6034 + (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
6035 + }
6036 + /* Reset the timer */
6037 + mod_timer(&adapter->watchdog_timer,
6038 + round_jiffies(jiffies + 2 * HZ));
6039 + }
6040 +
6041 + schedule_work(&adapter->watchdog_task);
6042 +}
6043 +
6044 +/**
6045 + * ixgbe_watchdog_task - worker thread to bring link up
6046 + * @work: pointer to work_struct containing our data
6047 + **/
6048 +static void ixgbe_watchdog_task(struct work_struct *work)
6049 +{
6050 + struct ixgbe_adapter *adapter = container_of(work,
6051 + struct ixgbe_adapter,
6052 + watchdog_task);
6053 struct net_device *netdev = adapter->netdev;
6054 - bool link_up;
6055 - u32 link_speed = 0;
6056 + struct ixgbe_hw *hw = &adapter->hw;
6057 + u32 link_speed = adapter->link_speed;
6058 + bool link_up = adapter->link_up;
6059
6060 - adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up);
6061 + adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
6062 +
6063 + if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
6064 + hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
6065 + if (link_up ||
6066 + time_after(jiffies, (adapter->link_check_timeout +
6067 + IXGBE_TRY_LINK_TIMEOUT))) {
6068 + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
6069 + adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
6070 + }
6071 + adapter->link_up = link_up;
6072 + adapter->link_speed = link_speed;
6073 + }
6074
6075 if (link_up) {
6076 if (!netif_carrier_ok(netdev)) {
6077 - u32 frctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
6078 - u32 rmcs = IXGBE_READ_REG(&adapter->hw, IXGBE_RMCS);
6079 + u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6080 + u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
6081 #define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
6082 #define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
6083 DPRINTK(LINK, INFO, "NIC Link is Up %s, "
6084 - "Flow Control: %s\n",
6085 - (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
6086 - "10 Gbps" :
6087 - (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
6088 - "1 Gbps" : "unknown speed")),
6089 - ((FLOW_RX && FLOW_TX) ? "RX/TX" :
6090 - (FLOW_RX ? "RX" :
6091 - (FLOW_TX ? "TX" : "None"))));
6092 + "Flow Control: %s\n",
6093 + (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
6094 + "10 Gbps" :
6095 + (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
6096 + "1 Gbps" : "unknown speed")),
6097 + ((FLOW_RX && FLOW_TX) ? "RX/TX" :
6098 + (FLOW_RX ? "RX" :
6099 + (FLOW_TX ? "TX" : "None"))));
6100
6101 netif_carrier_on(netdev);
6102 netif_tx_wake_all_queues(netdev);
6103 @@ -2932,6 +3232,8 @@ static void ixgbe_watchdog(unsigned long
6104 adapter->detect_tx_hung = true;
6105 }
6106 } else {
6107 + adapter->link_up = false;
6108 + adapter->link_speed = 0;
6109 if (netif_carrier_ok(netdev)) {
6110 DPRINTK(LINK, INFO, "NIC Link is Down\n");
6111 netif_carrier_off(netdev);
6112 @@ -2940,36 +3242,19 @@ static void ixgbe_watchdog(unsigned long
6113 }
6114
6115 ixgbe_update_stats(adapter);
6116 -
6117 - if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
6118 - /* Cause software interrupt to ensure rx rings are cleaned */
6119 - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
6120 - u32 eics =
6121 - (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
6122 - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, eics);
6123 - } else {
6124 - /* for legacy and MSI interrupts don't set any bits that
6125 - * are enabled for EIAM, because this operation would
6126 - * set *both* EIMS and EICS for any bit in EIAM */
6127 - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
6128 - (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
6129 - }
6130 - /* Reset the timer */
6131 - mod_timer(&adapter->watchdog_timer,
6132 - round_jiffies(jiffies + 2 * HZ));
6133 - }
6134 + adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
6135 }
6136
6137 static int ixgbe_tso(struct ixgbe_adapter *adapter,
6138 - struct ixgbe_ring *tx_ring, struct sk_buff *skb,
6139 - u32 tx_flags, u8 *hdr_len)
6140 + struct ixgbe_ring *tx_ring, struct sk_buff *skb,
6141 + u32 tx_flags, u8 *hdr_len)
6142 {
6143 struct ixgbe_adv_tx_context_desc *context_desc;
6144 unsigned int i;
6145 int err;
6146 struct ixgbe_tx_buffer *tx_buffer_info;
6147 - u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
6148 - u32 mss_l4len_idx = 0, l4len;
6149 + u32 vlan_macip_lens = 0, type_tucmd_mlhl;
6150 + u32 mss_l4len_idx, l4len;
6151
6152 if (skb_is_gso(skb)) {
6153 if (skb_header_cloned(skb)) {
6154 @@ -2985,16 +3270,16 @@ static int ixgbe_tso(struct ixgbe_adapte
6155 iph->tot_len = 0;
6156 iph->check = 0;
6157 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6158 - iph->daddr, 0,
6159 - IPPROTO_TCP,
6160 - 0);
6161 + iph->daddr, 0,
6162 + IPPROTO_TCP,
6163 + 0);
6164 adapter->hw_tso_ctxt++;
6165 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
6166 ipv6_hdr(skb)->payload_len = 0;
6167 tcp_hdr(skb)->check =
6168 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
6169 - &ipv6_hdr(skb)->daddr,
6170 - 0, IPPROTO_TCP, 0);
6171 + &ipv6_hdr(skb)->daddr,
6172 + 0, IPPROTO_TCP, 0);
6173 adapter->hw_tso6_ctxt++;
6174 }
6175
6176 @@ -3008,7 +3293,7 @@ static int ixgbe_tso(struct ixgbe_adapte
6177 vlan_macip_lens |=
6178 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
6179 vlan_macip_lens |= ((skb_network_offset(skb)) <<
6180 - IXGBE_ADVTXD_MACLEN_SHIFT);
6181 + IXGBE_ADVTXD_MACLEN_SHIFT);
6182 *hdr_len += skb_network_offset(skb);
6183 vlan_macip_lens |=
6184 (skb_transport_header(skb) - skb_network_header(skb));
6185 @@ -3018,8 +3303,8 @@ static int ixgbe_tso(struct ixgbe_adapte
6186 context_desc->seqnum_seed = 0;
6187
6188 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
6189 - type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
6190 - IXGBE_ADVTXD_DTYP_CTXT);
6191 + type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
6192 + IXGBE_ADVTXD_DTYP_CTXT);
6193
6194 if (skb->protocol == htons(ETH_P_IP))
6195 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
6196 @@ -3027,9 +3312,11 @@ static int ixgbe_tso(struct ixgbe_adapte
6197 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
6198
6199 /* MSS L4LEN IDX */
6200 - mss_l4len_idx |=
6201 + mss_l4len_idx =
6202 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
6203 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
6204 + /* use index 1 for TSO */
6205 + mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
6206 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
6207
6208 tx_buffer_info->time_stamp = jiffies;
6209 @@ -3046,8 +3333,8 @@ static int ixgbe_tso(struct ixgbe_adapte
6210 }
6211
6212 static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
6213 - struct ixgbe_ring *tx_ring,
6214 - struct sk_buff *skb, u32 tx_flags)
6215 + struct ixgbe_ring *tx_ring,
6216 + struct sk_buff *skb, u32 tx_flags)
6217 {
6218 struct ixgbe_adv_tx_context_desc *context_desc;
6219 unsigned int i;
6220 @@ -3064,16 +3351,16 @@ static bool ixgbe_tx_csum(struct ixgbe_a
6221 vlan_macip_lens |=
6222 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
6223 vlan_macip_lens |= (skb_network_offset(skb) <<
6224 - IXGBE_ADVTXD_MACLEN_SHIFT);
6225 + IXGBE_ADVTXD_MACLEN_SHIFT);
6226 if (skb->ip_summed == CHECKSUM_PARTIAL)
6227 vlan_macip_lens |= (skb_transport_header(skb) -
6228 - skb_network_header(skb));
6229 + skb_network_header(skb));
6230
6231 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
6232 context_desc->seqnum_seed = 0;
6233
6234 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
6235 - IXGBE_ADVTXD_DTYP_CTXT);
6236 + IXGBE_ADVTXD_DTYP_CTXT);
6237
6238 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6239 switch (skb->protocol) {
6240 @@ -3081,16 +3368,14 @@ static bool ixgbe_tx_csum(struct ixgbe_a
6241 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
6242 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
6243 type_tucmd_mlhl |=
6244 - IXGBE_ADVTXD_TUCMD_L4T_TCP;
6245 + IXGBE_ADVTXD_TUCMD_L4T_TCP;
6246 break;
6247 -
6248 case __constant_htons(ETH_P_IPV6):
6249 /* XXX what about other V6 headers?? */
6250 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
6251 type_tucmd_mlhl |=
6252 - IXGBE_ADVTXD_TUCMD_L4T_TCP;
6253 + IXGBE_ADVTXD_TUCMD_L4T_TCP;
6254 break;
6255 -
6256 default:
6257 if (unlikely(net_ratelimit())) {
6258 DPRINTK(PROBE, WARNING,
6259 @@ -3102,10 +3387,12 @@ static bool ixgbe_tx_csum(struct ixgbe_a
6260 }
6261
6262 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
6263 + /* use index zero for tx checksum offload */
6264 context_desc->mss_l4len_idx = 0;
6265
6266 tx_buffer_info->time_stamp = jiffies;
6267 tx_buffer_info->next_to_watch = i;
6268 +
6269 adapter->hw_csum_tx_good++;
6270 i++;
6271 if (i == tx_ring->count)
6272 @@ -3114,12 +3401,13 @@ static bool ixgbe_tx_csum(struct ixgbe_a
6273
6274 return true;
6275 }
6276 +
6277 return false;
6278 }
6279
6280 static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
6281 - struct ixgbe_ring *tx_ring,
6282 - struct sk_buff *skb, unsigned int first)
6283 + struct ixgbe_ring *tx_ring,
6284 + struct sk_buff *skb, unsigned int first)
6285 {
6286 struct ixgbe_tx_buffer *tx_buffer_info;
6287 unsigned int len = skb->len;
6288 @@ -3137,8 +3425,8 @@ static int ixgbe_tx_map(struct ixgbe_ada
6289
6290 tx_buffer_info->length = size;
6291 tx_buffer_info->dma = pci_map_single(adapter->pdev,
6292 - skb->data + offset,
6293 - size, PCI_DMA_TODEVICE);
6294 + skb->data + offset,
6295 + size, PCI_DMA_TODEVICE);
6296 tx_buffer_info->time_stamp = jiffies;
6297 tx_buffer_info->next_to_watch = i;
6298
6299 @@ -3163,9 +3451,10 @@ static int ixgbe_tx_map(struct ixgbe_ada
6300
6301 tx_buffer_info->length = size;
6302 tx_buffer_info->dma = pci_map_page(adapter->pdev,
6303 - frag->page,
6304 - offset,
6305 - size, PCI_DMA_TODEVICE);
6306 + frag->page,
6307 + offset,
6308 + size,
6309 + PCI_DMA_TODEVICE);
6310 tx_buffer_info->time_stamp = jiffies;
6311 tx_buffer_info->next_to_watch = i;
6312
6313 @@ -3188,8 +3477,8 @@ static int ixgbe_tx_map(struct ixgbe_ada
6314 }
6315
6316 static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6317 - struct ixgbe_ring *tx_ring,
6318 - int tx_flags, int count, u32 paylen, u8 hdr_len)
6319 + struct ixgbe_ring *tx_ring,
6320 + int tx_flags, int count, u32 paylen, u8 hdr_len)
6321 {
6322 union ixgbe_adv_tx_desc *tx_desc = NULL;
6323 struct ixgbe_tx_buffer *tx_buffer_info;
6324 @@ -3208,15 +3497,17 @@ static void ixgbe_tx_queue(struct ixgbe_
6325 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
6326
6327 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
6328 - IXGBE_ADVTXD_POPTS_SHIFT;
6329 + IXGBE_ADVTXD_POPTS_SHIFT;
6330
6331 + /* use index 1 context for tso */
6332 + olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
6333 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
6334 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
6335 - IXGBE_ADVTXD_POPTS_SHIFT;
6336 + IXGBE_ADVTXD_POPTS_SHIFT;
6337
6338 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
6339 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
6340 - IXGBE_ADVTXD_POPTS_SHIFT;
6341 + IXGBE_ADVTXD_POPTS_SHIFT;
6342
6343 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
6344
6345 @@ -3226,9 +3517,8 @@ static void ixgbe_tx_queue(struct ixgbe_
6346 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
6347 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
6348 tx_desc->read.cmd_type_len =
6349 - cpu_to_le32(cmd_type_len | tx_buffer_info->length);
6350 + cpu_to_le32(cmd_type_len | tx_buffer_info->length);
6351 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6352 -
6353 i++;
6354 if (i == tx_ring->count)
6355 i = 0;
6356 @@ -3249,7 +3539,7 @@ static void ixgbe_tx_queue(struct ixgbe_
6357 }
6358
6359 static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
6360 - struct ixgbe_ring *tx_ring, int size)
6361 + struct ixgbe_ring *tx_ring, int size)
6362 {
6363 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6364
6365 @@ -3265,61 +3555,52 @@ static int __ixgbe_maybe_stop_tx(struct
6366 return -EBUSY;
6367
6368 /* A reprieve! - use start_queue because it doesn't call schedule */
6369 - netif_wake_subqueue(netdev, tx_ring->queue_index);
6370 + netif_start_subqueue(netdev, tx_ring->queue_index);
6371 ++adapter->restart_queue;
6372 return 0;
6373 }
6374
6375 static int ixgbe_maybe_stop_tx(struct net_device *netdev,
6376 - struct ixgbe_ring *tx_ring, int size)
6377 + struct ixgbe_ring *tx_ring, int size)
6378 {
6379 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
6380 return 0;
6381 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
6382 }
6383
6384 -
6385 static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
6386 {
6387 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6388 struct ixgbe_ring *tx_ring;
6389 - unsigned int len = skb->len;
6390 unsigned int first;
6391 unsigned int tx_flags = 0;
6392 u8 hdr_len = 0;
6393 int r_idx = 0, tso;
6394 - unsigned int mss = 0;
6395 int count = 0;
6396 unsigned int f;
6397 - unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
6398 - len -= skb->data_len;
6399 +
6400 r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping;
6401 tx_ring = &adapter->tx_ring[r_idx];
6402
6403 -
6404 - if (skb->len <= 0) {
6405 - dev_kfree_skb(skb);
6406 - return NETDEV_TX_OK;
6407 + if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
6408 + tx_flags |= vlan_tx_tag_get(skb);
6409 + tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6410 + tx_flags |= IXGBE_TX_FLAGS_VLAN;
6411 }
6412 - mss = skb_shinfo(skb)->gso_size;
6413 -
6414 - if (mss)
6415 - count++;
6416 - else if (skb->ip_summed == CHECKSUM_PARTIAL)
6417 + /* three things can cause us to need a context descriptor */
6418 + if (skb_is_gso(skb) ||
6419 + (skb->ip_summed == CHECKSUM_PARTIAL) ||
6420 + (tx_flags & IXGBE_TX_FLAGS_VLAN))
6421 count++;
6422
6423 - count += TXD_USE_COUNT(len);
6424 - for (f = 0; f < nr_frags; f++)
6425 + count += TXD_USE_COUNT(skb_headlen(skb));
6426 + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6427 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6428
6429 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
6430 adapter->tx_busy++;
6431 return NETDEV_TX_BUSY;
6432 }
6433 - if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
6434 - tx_flags |= IXGBE_TX_FLAGS_VLAN;
6435 - tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
6436 - }
6437
6438 if (skb->protocol == htons(ETH_P_IP))
6439 tx_flags |= IXGBE_TX_FLAGS_IPV4;
6440 @@ -3333,12 +3614,12 @@ static int ixgbe_xmit_frame(struct sk_bu
6441 if (tso)
6442 tx_flags |= IXGBE_TX_FLAGS_TSO;
6443 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
6444 - (skb->ip_summed == CHECKSUM_PARTIAL))
6445 + (skb->ip_summed == CHECKSUM_PARTIAL))
6446 tx_flags |= IXGBE_TX_FLAGS_CSUM;
6447
6448 ixgbe_tx_queue(adapter, tx_ring, tx_flags,
6449 - ixgbe_tx_map(adapter, tx_ring, skb, first),
6450 - skb->len, hdr_len);
6451 + ixgbe_tx_map(adapter, tx_ring, skb, first),
6452 + skb->len, hdr_len);
6453
6454 netdev->trans_start = jiffies;
6455
6456 @@ -3372,15 +3653,16 @@ static struct net_device_stats *ixgbe_ge
6457 static int ixgbe_set_mac(struct net_device *netdev, void *p)
6458 {
6459 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6460 + struct ixgbe_hw *hw = &adapter->hw;
6461 struct sockaddr *addr = p;
6462
6463 if (!is_valid_ether_addr(addr->sa_data))
6464 return -EADDRNOTAVAIL;
6465
6466 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
6467 - memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
6468 + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
6469
6470 - ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
6471 + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
6472
6473 return 0;
6474 }
6475 @@ -3404,28 +3686,19 @@ static void ixgbe_netpoll(struct net_dev
6476 #endif
6477
6478 /**
6479 - * ixgbe_napi_add_all - prep napi structs for use
6480 - * @adapter: private struct
6481 - * helper function to napi_add each possible q_vector->napi
6482 - */
6483 -static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
6484 + * ixgbe_link_config - set up initial link with default speed and duplex
6485 + * @hw: pointer to private hardware struct
6486 + *
6487 + * Returns 0 on success, negative on failure
6488 + **/
6489 +static int ixgbe_link_config(struct ixgbe_hw *hw)
6490 {
6491 - int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
6492 - int (*poll)(struct napi_struct *, int);
6493 + u32 autoneg = IXGBE_LINK_SPEED_10GB_FULL;
6494
6495 - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
6496 - poll = &ixgbe_clean_rxonly;
6497 - } else {
6498 - poll = &ixgbe_poll;
6499 - /* only one q_vector for legacy modes */
6500 - q_vectors = 1;
6501 - }
6502 + /* must always autoneg for both 1G and 10G link */
6503 + hw->mac.autoneg = true;
6504
6505 - for (i = 0; i < q_vectors; i++) {
6506 - struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
6507 - netif_napi_add(adapter->netdev, &q_vector->napi,
6508 - (*poll), 64);
6509 - }
6510 + return hw->mac.ops.setup_link_speed(hw, autoneg, true, true);
6511 }
6512
6513 /**
6514 @@ -3440,17 +3713,16 @@ static void ixgbe_napi_add_all(struct ix
6515 * and a hardware reset occur.
6516 **/
6517 static int __devinit ixgbe_probe(struct pci_dev *pdev,
6518 - const struct pci_device_id *ent)
6519 + const struct pci_device_id *ent)
6520 {
6521 struct net_device *netdev;
6522 struct ixgbe_adapter *adapter = NULL;
6523 struct ixgbe_hw *hw;
6524 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
6525 - unsigned long mmio_start, mmio_len;
6526 static int cards_found;
6527 int i, err, pci_using_dac;
6528 u16 link_status, link_speed, link_width;
6529 - u32 part_num;
6530 + u32 part_num, eec;
6531
6532 err = pci_enable_device(pdev);
6533 if (err)
6534 @@ -3465,7 +3737,7 @@ static int __devinit ixgbe_probe(struct
6535 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
6536 if (err) {
6537 dev_err(&pdev->dev, "No usable DMA "
6538 - "configuration, aborting\n");
6539 + "configuration, aborting\n");
6540 goto err_dma;
6541 }
6542 }
6543 @@ -3498,10 +3770,8 @@ static int __devinit ixgbe_probe(struct
6544 hw->back = adapter;
6545 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
6546
6547 - mmio_start = pci_resource_start(pdev, 0);
6548 - mmio_len = pci_resource_len(pdev, 0);
6549 -
6550 - hw->hw_addr = ioremap(mmio_start, mmio_len);
6551 + hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
6552 + pci_resource_len(pdev, 0));
6553 if (!hw->hw_addr) {
6554 err = -EIO;
6555 goto err_ioremap;
6556 @@ -3516,7 +3786,8 @@ static int __devinit ixgbe_probe(struct
6557 netdev->stop = &ixgbe_close;
6558 netdev->hard_start_xmit = &ixgbe_xmit_frame;
6559 netdev->get_stats = &ixgbe_get_stats;
6560 - netdev->set_multicast_list = &ixgbe_set_multi;
6561 + netdev->set_rx_mode = &ixgbe_set_rx_mode;
6562 + netdev->set_multicast_list = &ixgbe_set_rx_mode;
6563 netdev->set_mac_address = &ixgbe_set_mac;
6564 netdev->change_mtu = &ixgbe_change_mtu;
6565 ixgbe_set_ethtool_ops(netdev);
6566 @@ -3530,22 +3801,23 @@ static int __devinit ixgbe_probe(struct
6567 #endif
6568 strcpy(netdev->name, pci_name(pdev));
6569
6570 - netdev->mem_start = mmio_start;
6571 - netdev->mem_end = mmio_start + mmio_len;
6572 -
6573 adapter->bd_number = cards_found;
6574
6575 - /* PCI config space info */
6576 - hw->vendor_id = pdev->vendor;
6577 - hw->device_id = pdev->device;
6578 - hw->revision_id = pdev->revision;
6579 - hw->subsystem_vendor_id = pdev->subsystem_vendor;
6580 - hw->subsystem_device_id = pdev->subsystem_device;
6581 -
6582 /* Setup hw api */
6583 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
6584 hw->mac.type = ii->mac;
6585
6586 + /* EEPROM */
6587 + memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
6588 + eec = IXGBE_READ_REG(hw, IXGBE_EEC);
6589 + /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
6590 + if (!(eec & (1 << 8)))
6591 + hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
6592 +
6593 + /* PHY */
6594 + memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
6595 + /* phy->sfp_type = ixgbe_sfp_type_unknown; */
6596 +
6597 err = ii->get_invariants(hw);
6598 if (err)
6599 goto err_hw_init;
6600 @@ -3555,26 +3827,36 @@ static int __devinit ixgbe_probe(struct
6601 if (err)
6602 goto err_sw_init;
6603
6604 + /* reset_hw fills in the perm_addr as well */
6605 + err = hw->mac.ops.reset_hw(hw);
6606 + if (err) {
6607 + dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
6608 + goto err_sw_init;
6609 + }
6610 +
6611 netdev->features = NETIF_F_SG |
6612 - NETIF_F_HW_CSUM |
6613 - NETIF_F_HW_VLAN_TX |
6614 - NETIF_F_HW_VLAN_RX |
6615 - NETIF_F_HW_VLAN_FILTER;
6616 + NETIF_F_IP_CSUM |
6617 + NETIF_F_HW_VLAN_TX |
6618 + NETIF_F_HW_VLAN_RX |
6619 + NETIF_F_HW_VLAN_FILTER;
6620
6621 - netdev->features |= NETIF_F_LRO;
6622 + netdev->features |= NETIF_F_IPV6_CSUM;
6623 netdev->features |= NETIF_F_TSO;
6624 netdev->features |= NETIF_F_TSO6;
6625 +#ifdef CONFIG_IXGBE_LRO
6626 + netdev->features |= NETIF_F_LRO;
6627 +#endif
6628
6629 netdev->vlan_features |= NETIF_F_TSO;
6630 netdev->vlan_features |= NETIF_F_TSO6;
6631 - netdev->vlan_features |= NETIF_F_HW_CSUM;
6632 + netdev->vlan_features |= NETIF_F_IP_CSUM;
6633 netdev->vlan_features |= NETIF_F_SG;
6634
6635 if (pci_using_dac)
6636 netdev->features |= NETIF_F_HIGHDMA;
6637
6638 /* make sure the EEPROM is good */
6639 - if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
6640 + if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
6641 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
6642 err = -EIO;
6643 goto err_eeprom;
6644 @@ -3583,7 +3865,8 @@ static int __devinit ixgbe_probe(struct
6645 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
6646 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
6647
6648 - if (ixgbe_validate_mac_addr(netdev->dev_addr)) {
6649 + if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
6650 + dev_err(&pdev->dev, "invalid MAC address\n");
6651 err = -EIO;
6652 goto err_eeprom;
6653 }
6654 @@ -3593,13 +3876,7 @@ static int __devinit ixgbe_probe(struct
6655 adapter->watchdog_timer.data = (unsigned long)adapter;
6656
6657 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
6658 -
6659 - /* initialize default flow control settings */
6660 - hw->fc.original_type = ixgbe_fc_full;
6661 - hw->fc.type = ixgbe_fc_full;
6662 - hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
6663 - hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
6664 - hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
6665 + INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
6666
6667 err = ixgbe_init_interrupt_scheme(adapter);
6668 if (err)
6669 @@ -3610,32 +3887,39 @@ static int __devinit ixgbe_probe(struct
6670 link_speed = link_status & IXGBE_PCI_LINK_SPEED;
6671 link_width = link_status & IXGBE_PCI_LINK_WIDTH;
6672 dev_info(&pdev->dev, "(PCI Express:%s:%s) "
6673 - "%02x:%02x:%02x:%02x:%02x:%02x\n",
6674 - ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
6675 - (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
6676 - "Unknown"),
6677 - ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
6678 - (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
6679 - (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
6680 - (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
6681 - "Unknown"),
6682 - netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
6683 - netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
6684 - ixgbe_read_part_num(hw, &part_num);
6685 + "%02x:%02x:%02x:%02x:%02x:%02x\n",
6686 + ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
6687 + (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
6688 + "Unknown"),
6689 + ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
6690 + (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
6691 + (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
6692 + (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
6693 + "Unknown"),
6694 + netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
6695 + netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
6696 + ixgbe_read_pba_num_generic(hw, &part_num);
6697 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
6698 - hw->mac.type, hw->phy.type,
6699 - (part_num >> 8), (part_num & 0xff));
6700 + hw->mac.type, hw->phy.type,
6701 + (part_num >> 8), (part_num & 0xff));
6702
6703 if (link_width <= IXGBE_PCI_LINK_WIDTH_4) {
6704 dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
6705 - "this card is not sufficient for optimal "
6706 - "performance.\n");
6707 + "this card is not sufficient for optimal "
6708 + "performance.\n");
6709 dev_warn(&pdev->dev, "For optimal performance a x8 "
6710 - "PCI-Express slot is required.\n");
6711 + "PCI-Express slot is required.\n");
6712 }
6713
6714 /* reset the hardware with the new settings */
6715 - ixgbe_start_hw(hw);
6716 + hw->mac.ops.start_hw(hw);
6717 +
6718 + /* link_config depends on start_hw being called at least once */
6719 + err = ixgbe_link_config(hw);
6720 + if (err) {
6721 + dev_err(&pdev->dev, "setup_link_speed FAILED %d\n", err);
6722 + goto err_register;
6723 + }
6724
6725 netif_carrier_off(netdev);
6726 netif_tx_stop_all_queues(netdev);
6727 @@ -3647,7 +3931,7 @@ static int __devinit ixgbe_probe(struct
6728 if (err)
6729 goto err_register;
6730
6731 -#ifdef CONFIG_DCA
6732 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
6733 if (dca_add_requester(&pdev->dev) == 0) {
6734 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
6735 /* always use CB2 mode, difference is masked
6736 @@ -3697,7 +3981,7 @@ static void __devexit ixgbe_remove(struc
6737
6738 flush_scheduled_work();
6739
6740 -#ifdef CONFIG_DCA
6741 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
6742 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
6743 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
6744 dca_remove_requester(&pdev->dev);
6745 @@ -3715,6 +3999,7 @@ static void __devexit ixgbe_remove(struc
6746 pci_release_regions(pdev);
6747
6748 DPRINTK(PROBE, INFO, "complete\n");
6749 + ixgbe_napi_del_all(adapter);
6750 kfree(adapter->tx_ring);
6751 kfree(adapter->rx_ring);
6752
6753 @@ -3732,7 +4017,7 @@ static void __devexit ixgbe_remove(struc
6754 * this device has been detected.
6755 */
6756 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
6757 - pci_channel_state_t state)
6758 + pci_channel_state_t state)
6759 {
6760 struct net_device *netdev = pci_get_drvdata(pdev);
6761 struct ixgbe_adapter *adapter = netdev->priv;
6762 @@ -3743,7 +4028,7 @@ static pci_ers_result_t ixgbe_io_error_d
6763 ixgbe_down(adapter);
6764 pci_disable_device(pdev);
6765
6766 - /* Request a slot slot reset. */
6767 + /* Request a slot reset. */
6768 return PCI_ERS_RESULT_NEED_RESET;
6769 }
6770
6771 @@ -3760,7 +4045,7 @@ static pci_ers_result_t ixgbe_io_slot_re
6772
6773 if (pci_enable_device(pdev)) {
6774 DPRINTK(PROBE, ERR,
6775 - "Cannot re-enable PCI device after reset.\n");
6776 + "Cannot re-enable PCI device after reset.\n");
6777 return PCI_ERS_RESULT_DISCONNECT;
6778 }
6779 pci_set_master(pdev);
6780 @@ -3794,7 +4079,6 @@ static void ixgbe_io_resume(struct pci_d
6781 }
6782
6783 netif_device_attach(netdev);
6784 -
6785 }
6786
6787 static struct pci_error_handlers ixgbe_err_handler = {
6788 @@ -3830,13 +4114,14 @@ static int __init ixgbe_init_module(void
6789
6790 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
6791
6792 -#ifdef CONFIG_DCA
6793 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
6794 dca_register_notify(&dca_notifier);
6795
6796 #endif
6797 ret = pci_register_driver(&ixgbe_driver);
6798 return ret;
6799 }
6800 +
6801 module_init(ixgbe_init_module);
6802
6803 /**
6804 @@ -3847,24 +4132,24 @@ module_init(ixgbe_init_module);
6805 **/
6806 static void __exit ixgbe_exit_module(void)
6807 {
6808 -#ifdef CONFIG_DCA
6809 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
6810 dca_unregister_notify(&dca_notifier);
6811 #endif
6812 pci_unregister_driver(&ixgbe_driver);
6813 }
6814
6815 -#ifdef CONFIG_DCA
6816 +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
6817 static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
6818 - void *p)
6819 + void *p)
6820 {
6821 int ret_val;
6822
6823 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
6824 - __ixgbe_notify_dca);
6825 + __ixgbe_notify_dca);
6826
6827 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
6828 }
6829 -#endif /* CONFIG_DCA */
6830 +#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
6831
6832 module_exit(ixgbe_exit_module);
6833
6834 --- a/drivers/net/ixgbe/ixgbe_phy.c
6835 +++ b/drivers/net/ixgbe/ixgbe_phy.c
6836 @@ -1,7 +1,7 @@
6837 /*******************************************************************************
6838
6839 Intel 10 Gigabit PCI Express Linux driver
6840 - Copyright(c) 1999 - 2007 Intel Corporation.
6841 + Copyright(c) 1999 - 2008 Intel Corporation.
6842
6843 This program is free software; you can redistribute it and/or modify it
6844 under the terms and conditions of the GNU General Public License,
6845 @@ -20,7 +20,6 @@
6846 the file called "COPYING".
6847
6848 Contact Information:
6849 - Linux NICS <linux.nics@intel.com>
6850 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
6851 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
6852
6853 @@ -33,32 +32,36 @@
6854 #include "ixgbe_common.h"
6855 #include "ixgbe_phy.h"
6856
6857 +static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
6858 static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
6859 static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
6860 -static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
6861 -static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
6862 - u32 device_type, u16 phy_data);
6863
6864 /**
6865 - * ixgbe_identify_phy - Get physical layer module
6866 + * ixgbe_identify_phy_generic - Get physical layer module
6867 * @hw: pointer to hardware structure
6868 *
6869 * Determines the physical layer module found on the current adapter.
6870 **/
6871 -s32 ixgbe_identify_phy(struct ixgbe_hw *hw)
6872 +s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
6873 {
6874 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
6875 u32 phy_addr;
6876
6877 - for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
6878 - if (ixgbe_validate_phy_addr(hw, phy_addr)) {
6879 - hw->phy.addr = phy_addr;
6880 - ixgbe_get_phy_id(hw);
6881 - hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id);
6882 - status = 0;
6883 - break;
6884 + if (hw->phy.type == ixgbe_phy_unknown) {
6885 + for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
6886 + if (ixgbe_validate_phy_addr(hw, phy_addr)) {
6887 + hw->phy.addr = phy_addr;
6888 + ixgbe_get_phy_id(hw);
6889 + hw->phy.type =
6890 + ixgbe_get_phy_type_from_id(hw->phy.id);
6891 + status = 0;
6892 + break;
6893 + }
6894 }
6895 + } else {
6896 + status = 0;
6897 }
6898 +
6899 return status;
6900 }
6901
6902 @@ -73,10 +76,8 @@ static bool ixgbe_validate_phy_addr(stru
6903 bool valid = false;
6904
6905 hw->phy.addr = phy_addr;
6906 - ixgbe_read_phy_reg(hw,
6907 - IXGBE_MDIO_PHY_ID_HIGH,
6908 - IXGBE_MDIO_PMA_PMD_DEV_TYPE,
6909 - &phy_id);
6910 + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
6911 + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
6912
6913 if (phy_id != 0xFFFF && phy_id != 0x0)
6914 valid = true;
6915 @@ -95,21 +96,18 @@ static s32 ixgbe_get_phy_id(struct ixgbe
6916 u16 phy_id_high = 0;
6917 u16 phy_id_low = 0;
6918
6919 - status = ixgbe_read_phy_reg(hw,
6920 - IXGBE_MDIO_PHY_ID_HIGH,
6921 - IXGBE_MDIO_PMA_PMD_DEV_TYPE,
6922 - &phy_id_high);
6923 + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
6924 + IXGBE_MDIO_PMA_PMD_DEV_TYPE,
6925 + &phy_id_high);
6926
6927 if (status == 0) {
6928 hw->phy.id = (u32)(phy_id_high << 16);
6929 - status = ixgbe_read_phy_reg(hw,
6930 - IXGBE_MDIO_PHY_ID_LOW,
6931 - IXGBE_MDIO_PMA_PMD_DEV_TYPE,
6932 - &phy_id_low);
6933 + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW,
6934 + IXGBE_MDIO_PMA_PMD_DEV_TYPE,
6935 + &phy_id_low);
6936 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
6937 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
6938 }
6939 -
6940 return status;
6941 }
6942
6943 @@ -123,9 +121,6 @@ static enum ixgbe_phy_type ixgbe_get_phy
6944 enum ixgbe_phy_type phy_type;
6945
6946 switch (phy_id) {
6947 - case TN1010_PHY_ID:
6948 - phy_type = ixgbe_phy_tn;
6949 - break;
6950 case QT2022_PHY_ID:
6951 phy_type = ixgbe_phy_qt;
6952 break;
6953 @@ -138,32 +133,31 @@ static enum ixgbe_phy_type ixgbe_get_phy
6954 }
6955
6956 /**
6957 - * ixgbe_reset_phy - Performs a PHY reset
6958 + * ixgbe_reset_phy_generic - Performs a PHY reset
6959 * @hw: pointer to hardware structure
6960 **/
6961 -s32 ixgbe_reset_phy(struct ixgbe_hw *hw)
6962 +s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
6963 {
6964 /*
6965 * Perform soft PHY reset to the PHY_XS.
6966 * This will cause a soft reset to the PHY
6967 */
6968 - return ixgbe_write_phy_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
6969 - IXGBE_MDIO_PHY_XS_DEV_TYPE,
6970 - IXGBE_MDIO_PHY_XS_RESET);
6971 + return hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
6972 + IXGBE_MDIO_PHY_XS_DEV_TYPE,
6973 + IXGBE_MDIO_PHY_XS_RESET);
6974 }
6975
6976 /**
6977 - * ixgbe_read_phy_reg - Reads a value from a specified PHY register
6978 + * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
6979 * @hw: pointer to hardware structure
6980 * @reg_addr: 32 bit address of PHY register to read
6981 * @phy_data: Pointer to read data from PHY register
6982 **/
6983 -s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
6984 - u32 device_type, u16 *phy_data)
6985 +s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
6986 + u32 device_type, u16 *phy_data)
6987 {
6988 u32 command;
6989 u32 i;
6990 - u32 timeout = 10;
6991 u32 data;
6992 s32 status = 0;
6993 u16 gssr;
6994 @@ -179,9 +173,9 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *
6995 if (status == 0) {
6996 /* Setup and write the address cycle command */
6997 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
6998 - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
6999 - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
7000 - (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
7001 + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
7002 + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
7003 + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
7004
7005 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
7006
7007 @@ -190,7 +184,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *
7008 * The MDI Command bit will clear when the operation is
7009 * complete
7010 */
7011 - for (i = 0; i < timeout; i++) {
7012 + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
7013 udelay(10);
7014
7015 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
7016 @@ -210,9 +204,9 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *
7017 * command
7018 */
7019 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
7020 - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
7021 - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
7022 - (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
7023 + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
7024 + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
7025 + (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
7026
7027 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
7028
7029 @@ -221,7 +215,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *
7030 * completed. The MDI Command bit will clear when the
7031 * operation is complete
7032 */
7033 - for (i = 0; i < timeout; i++) {
7034 + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
7035 udelay(10);
7036
7037 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
7038 @@ -231,8 +225,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *
7039 }
7040
7041 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
7042 - hw_dbg(hw,
7043 - "PHY read command didn't complete\n");
7044 + hw_dbg(hw, "PHY read command didn't complete\n");
7045 status = IXGBE_ERR_PHY;
7046 } else {
7047 /*
7048 @@ -247,22 +240,22 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *
7049
7050 ixgbe_release_swfw_sync(hw, gssr);
7051 }
7052 +
7053 return status;
7054 }
7055
7056 /**
7057 - * ixgbe_write_phy_reg - Writes a value to specified PHY register
7058 + * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
7059 * @hw: pointer to hardware structure
7060 * @reg_addr: 32 bit PHY register to write
7061 * @device_type: 5 bit device type
7062 * @phy_data: Data to write to the PHY register
7063 **/
7064 -static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
7065 - u32 device_type, u16 phy_data)
7066 +s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
7067 + u32 device_type, u16 phy_data)
7068 {
7069 u32 command;
7070 u32 i;
7071 - u32 timeout = 10;
7072 s32 status = 0;
7073 u16 gssr;
7074
7075 @@ -280,9 +273,9 @@ static s32 ixgbe_write_phy_reg(struct ix
7076
7077 /* Setup and write the address cycle command */
7078 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
7079 - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
7080 - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
7081 - (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
7082 + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
7083 + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
7084 + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
7085
7086 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
7087
7088 @@ -291,19 +284,19 @@ static s32 ixgbe_write_phy_reg(struct ix
7089 * The MDI Command bit will clear when the operation is
7090 * complete
7091 */
7092 - for (i = 0; i < timeout; i++) {
7093 + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
7094 udelay(10);
7095
7096 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
7097
7098 - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) {
7099 - hw_dbg(hw, "PHY address cmd didn't complete\n");
7100 + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
7101 break;
7102 - }
7103 }
7104
7105 - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0)
7106 + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
7107 + hw_dbg(hw, "PHY address cmd didn't complete\n");
7108 status = IXGBE_ERR_PHY;
7109 + }
7110
7111 if (status == 0) {
7112 /*
7113 @@ -311,9 +304,9 @@ static s32 ixgbe_write_phy_reg(struct ix
7114 * command
7115 */
7116 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
7117 - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
7118 - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
7119 - (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
7120 + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
7121 + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
7122 + (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
7123
7124 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
7125
7126 @@ -322,20 +315,19 @@ static s32 ixgbe_write_phy_reg(struct ix
7127 * completed. The MDI Command bit will clear when the
7128 * operation is complete
7129 */
7130 - for (i = 0; i < timeout; i++) {
7131 + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
7132 udelay(10);
7133
7134 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
7135
7136 - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) {
7137 - hw_dbg(hw, "PHY write command did not "
7138 - "complete.\n");
7139 + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
7140 break;
7141 - }
7142 }
7143
7144 - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0)
7145 + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
7146 + hw_dbg(hw, "PHY address cmd didn't complete\n");
7147 status = IXGBE_ERR_PHY;
7148 + }
7149 }
7150
7151 ixgbe_release_swfw_sync(hw, gssr);
7152 @@ -345,67 +337,54 @@ static s32 ixgbe_write_phy_reg(struct ix
7153 }
7154
7155 /**
7156 - * ixgbe_setup_tnx_phy_link - Set and restart autoneg
7157 + * ixgbe_setup_phy_link_generic - Set and restart autoneg
7158 * @hw: pointer to hardware structure
7159 *
7160 * Restart autonegotiation and PHY and waits for completion.
7161 **/
7162 -s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw)
7163 +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
7164 {
7165 s32 status = IXGBE_NOT_IMPLEMENTED;
7166 u32 time_out;
7167 u32 max_time_out = 10;
7168 - u16 autoneg_speed_selection_register = 0x10;
7169 - u16 autoneg_restart_mask = 0x0200;
7170 - u16 autoneg_complete_mask = 0x0020;
7171 - u16 autoneg_reg = 0;
7172 + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
7173
7174 /*
7175 * Set advertisement settings in PHY based on autoneg_advertised
7176 * settings. If autoneg_advertised = 0, then advertise default values
7177 - * txn devices cannot be "forced" to a autoneg 10G and fail. But can
7178 + * tnx devices cannot be "forced" to a autoneg 10G and fail. But can
7179 * for a 1G.
7180 */
7181 - ixgbe_read_phy_reg(hw,
7182 - autoneg_speed_selection_register,
7183 - IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
7184 - &autoneg_reg);
7185 + hw->phy.ops.read_reg(hw, IXGBE_MII_SPEED_SELECTION_REG,
7186 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
7187
7188 if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL)
7189 autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */
7190 else
7191 autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */
7192
7193 - ixgbe_write_phy_reg(hw,
7194 - autoneg_speed_selection_register,
7195 - IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
7196 - autoneg_reg);
7197 -
7198 + hw->phy.ops.write_reg(hw, IXGBE_MII_SPEED_SELECTION_REG,
7199 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
7200
7201 /* Restart PHY autonegotiation and wait for completion */
7202 - ixgbe_read_phy_reg(hw,
7203 - IXGBE_MDIO_AUTO_NEG_CONTROL,
7204 - IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
7205 - &autoneg_reg);
7206 -
7207 - autoneg_reg |= autoneg_restart_mask;
7208 -
7209 - ixgbe_write_phy_reg(hw,
7210 - IXGBE_MDIO_AUTO_NEG_CONTROL,
7211 - IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
7212 - autoneg_reg);
7213 + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
7214 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
7215 +
7216 + autoneg_reg |= IXGBE_MII_RESTART;
7217 +
7218 + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
7219 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
7220
7221 /* Wait for autonegotiation to finish */
7222 for (time_out = 0; time_out < max_time_out; time_out++) {
7223 udelay(10);
7224 /* Restart PHY autonegotiation and wait for completion */
7225 - status = ixgbe_read_phy_reg(hw,
7226 - IXGBE_MDIO_AUTO_NEG_STATUS,
7227 - IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
7228 - &autoneg_reg);
7229 + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
7230 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
7231 + &autoneg_reg);
7232
7233 - autoneg_reg &= autoneg_complete_mask;
7234 - if (autoneg_reg == autoneg_complete_mask) {
7235 + autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
7236 + if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
7237 status = 0;
7238 break;
7239 }
7240 @@ -418,64 +397,17 @@ s32 ixgbe_setup_tnx_phy_link(struct ixgb
7241 }
7242
7243 /**
7244 - * ixgbe_check_tnx_phy_link - Determine link and speed status
7245 - * @hw: pointer to hardware structure
7246 - *
7247 - * Reads the VS1 register to determine if link is up and the current speed for
7248 - * the PHY.
7249 - **/
7250 -s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed,
7251 - bool *link_up)
7252 -{
7253 - s32 status = 0;
7254 - u32 time_out;
7255 - u32 max_time_out = 10;
7256 - u16 phy_link = 0;
7257 - u16 phy_speed = 0;
7258 - u16 phy_data = 0;
7259 -
7260 - /* Initialize speed and link to default case */
7261 - *link_up = false;
7262 - *speed = IXGBE_LINK_SPEED_10GB_FULL;
7263 -
7264 - /*
7265 - * Check current speed and link status of the PHY register.
7266 - * This is a vendor specific register and may have to
7267 - * be changed for other copper PHYs.
7268 - */
7269 - for (time_out = 0; time_out < max_time_out; time_out++) {
7270 - udelay(10);
7271 - if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
7272 - *link_up = true;
7273 - if (phy_speed ==
7274 - IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
7275 - *speed = IXGBE_LINK_SPEED_1GB_FULL;
7276 - break;
7277 - } else {
7278 - status = ixgbe_read_phy_reg(hw,
7279 - IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
7280 - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
7281 - &phy_data);
7282 - phy_link = phy_data &
7283 - IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
7284 - phy_speed = phy_data &
7285 - IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
7286 - }
7287 - }
7288 -
7289 - return status;
7290 -}
7291 -
7292 -/**
7293 - * ixgbe_setup_tnx_phy_link_speed - Sets the auto advertised capabilities
7294 + * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
7295 * @hw: pointer to hardware structure
7296 * @speed: new link speed
7297 * @autoneg: true if autonegotiation enabled
7298 **/
7299 -s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed,
7300 - bool autoneg,
7301 - bool autoneg_wait_to_complete)
7302 +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
7303 + ixgbe_link_speed speed,
7304 + bool autoneg,
7305 + bool autoneg_wait_to_complete)
7306 {
7307 +
7308 /*
7309 * Clear autoneg_advertised and set new values based on input link
7310 * speed.
7311 @@ -484,11 +416,13 @@ s32 ixgbe_setup_tnx_phy_link_speed(struc
7312
7313 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
7314 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
7315 +
7316 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
7317 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
7318
7319 /* Setup link based on the new speed settings */
7320 - ixgbe_setup_tnx_phy_link(hw);
7321 + hw->phy.ops.setup_link(hw);
7322
7323 return 0;
7324 }
7325 +
7326 --- a/drivers/net/ixgbe/ixgbe_phy.h
7327 +++ b/drivers/net/ixgbe/ixgbe_phy.h
7328 @@ -1,7 +1,7 @@
7329 /*******************************************************************************
7330
7331 Intel 10 Gigabit PCI Express Linux driver
7332 - Copyright(c) 1999 - 2007 Intel Corporation.
7333 + Copyright(c) 1999 - 2008 Intel Corporation.
7334
7335 This program is free software; you can redistribute it and/or modify it
7336 under the terms and conditions of the GNU General Public License,
7337 @@ -20,7 +20,6 @@
7338 the file called "COPYING".
7339
7340 Contact Information:
7341 - Linux NICS <linux.nics@intel.com>
7342 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
7343 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
7344
7345 @@ -30,20 +29,52 @@
7346 #define _IXGBE_PHY_H_
7347
7348 #include "ixgbe_type.h"
7349 +#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
7350
7351 -s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
7352 -s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up);
7353 -s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg,
7354 - bool autoneg_wait_to_complete);
7355 -s32 ixgbe_identify_phy(struct ixgbe_hw *hw);
7356 -s32 ixgbe_reset_phy(struct ixgbe_hw *hw);
7357 -s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
7358 - u32 device_type, u16 *phy_data);
7359 -
7360 -/* PHY specific */
7361 -s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw);
7362 -s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up);
7363 -s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg,
7364 - bool autoneg_wait_to_complete);
7365 +/* EEPROM byte offsets */
7366 +#define IXGBE_SFF_IDENTIFIER 0x0
7367 +#define IXGBE_SFF_IDENTIFIER_SFP 0x3
7368 +#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
7369 +#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
7370 +#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
7371 +#define IXGBE_SFF_1GBE_COMP_CODES 0x6
7372 +#define IXGBE_SFF_10GBE_COMP_CODES 0x3
7373 +#define IXGBE_SFF_TRANSMISSION_MEDIA 0x9
7374 +
7375 +/* Bitmasks */
7376 +#define IXGBE_SFF_TWIN_AX_CAPABLE 0x80
7377 +#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
7378 +#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
7379 +#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
7380 +#define IXGBE_I2C_EEPROM_READ_MASK 0x100
7381 +#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
7382 +#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
7383 +#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
7384 +#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
7385 +#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
7386 +
7387 +/* Bit-shift macros */
7388 +#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 12
7389 +#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 8
7390 +#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 4
7391 +
7392 +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
7393 +#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600
7394 +#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500
7395 +#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00
7396 +
7397 +
7398 +s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
7399 +s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
7400 +s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
7401 +s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
7402 + u32 device_type, u16 *phy_data);
7403 +s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
7404 + u32 device_type, u16 phy_data);
7405 +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
7406 +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
7407 + ixgbe_link_speed speed,
7408 + bool autoneg,
7409 + bool autoneg_wait_to_complete);
7410
7411 #endif /* _IXGBE_PHY_H_ */
7412 --- a/drivers/net/ixgbe/ixgbe_type.h
7413 +++ b/drivers/net/ixgbe/ixgbe_type.h
7414 @@ -1,7 +1,7 @@
7415 /*******************************************************************************
7416
7417 Intel 10 Gigabit PCI Express Linux driver
7418 - Copyright(c) 1999 - 2007 Intel Corporation.
7419 + Copyright(c) 1999 - 2008 Intel Corporation.
7420
7421 This program is free software; you can redistribute it and/or modify it
7422 under the terms and conditions of the GNU General Public License,
7423 @@ -20,7 +20,6 @@
7424 the file called "COPYING".
7425
7426 Contact Information:
7427 - Linux NICS <linux.nics@intel.com>
7428 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
7429 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
7430
7431 @@ -37,9 +36,9 @@
7432 /* Device IDs */
7433 #define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
7434 #define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
7435 -#define IXGBE_DEV_ID_82598AT_DUAL_PORT 0x10C8
7436 #define IXGBE_DEV_ID_82598EB_CX4 0x10DD
7437 #define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
7438 +#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
7439
7440 /* General Registers */
7441 #define IXGBE_CTRL 0x00000
7442 @@ -70,11 +69,11 @@
7443 #define IXGBE_EIMC 0x00888
7444 #define IXGBE_EIAC 0x00810
7445 #define IXGBE_EIAM 0x00890
7446 -#define IXGBE_EITR(_i) (0x00820 + ((_i) * 4)) /* 0x820-0x86c */
7447 -#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
7448 +#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : (0x012300 + ((_i) * 4)))
7449 +#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
7450 #define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */
7451 #define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */
7452 -#define IXGBE_PBACL 0x11068
7453 +#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4)))
7454 #define IXGBE_GPIE 0x00898
7455
7456 /* Flow Control Registers */
7457 @@ -86,20 +85,33 @@
7458 #define IXGBE_TFCS 0x0CE00
7459
7460 /* Receive DMA Registers */
7461 -#define IXGBE_RDBAL(_i) (0x01000 + ((_i) * 0x40)) /* 64 of each (0-63)*/
7462 -#define IXGBE_RDBAH(_i) (0x01004 + ((_i) * 0x40))
7463 -#define IXGBE_RDLEN(_i) (0x01008 + ((_i) * 0x40))
7464 -#define IXGBE_RDH(_i) (0x01010 + ((_i) * 0x40))
7465 -#define IXGBE_RDT(_i) (0x01018 + ((_i) * 0x40))
7466 -#define IXGBE_RXDCTL(_i) (0x01028 + ((_i) * 0x40))
7467 -#define IXGBE_RSCCTL(_i) (0x0102C + ((_i) * 0x40))
7468 -#define IXGBE_SRRCTL(_i) (0x02100 + ((_i) * 4))
7469 - /* array of 16 (0x02100-0x0213C) */
7470 -#define IXGBE_DCA_RXCTRL(_i) (0x02200 + ((_i) * 4))
7471 - /* array of 16 (0x02200-0x0223C) */
7472 -#define IXGBE_RDRXCTL 0x02F00
7473 +#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : (0x0D000 + ((_i - 64) * 0x40)))
7474 +#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : (0x0D004 + ((_i - 64) * 0x40)))
7475 +#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : (0x0D008 + ((_i - 64) * 0x40)))
7476 +#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : (0x0D010 + ((_i - 64) * 0x40)))
7477 +#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : (0x0D018 + ((_i - 64) * 0x40)))
7478 +#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : (0x0D028 + ((_i - 64) * 0x40)))
7479 +/*
7480 + * Split and Replication Receive Control Registers
7481 + * 00-15 : 0x02100 + n*4
7482 + * 16-64 : 0x01014 + n*0x40
7483 + * 64-127: 0x0D014 + (n-64)*0x40
7484 + */
7485 +#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
7486 + (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
7487 + (0x0D014 + ((_i - 64) * 0x40))))
7488 +/*
7489 + * Rx DCA Control Register:
7490 + * 00-15 : 0x02200 + n*4
7491 + * 16-64 : 0x0100C + n*0x40
7492 + * 64-127: 0x0D00C + (n-64)*0x40
7493 + */
7494 +#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
7495 + (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
7496 + (0x0D00C + ((_i - 64) * 0x40))))
7497 +#define IXGBE_RDRXCTL 0x02F00
7498 #define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
7499 - /* 8 of these 0x03C00 - 0x03C1C */
7500 + /* 8 of these 0x03C00 - 0x03C1C */
7501 #define IXGBE_RXCTRL 0x03000
7502 #define IXGBE_DROPEN 0x03D04
7503 #define IXGBE_RXPBSIZE_SHIFT 10
7504 @@ -107,29 +119,32 @@
7505 /* Receive Registers */
7506 #define IXGBE_RXCSUM 0x05000
7507 #define IXGBE_RFCTL 0x05008
7508 +#define IXGBE_DRECCCTL 0x02F08
7509 +#define IXGBE_DRECCCTL_DISABLE 0
7510 +/* Multicast Table Array - 128 entries */
7511 #define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
7512 - /* Multicast Table Array - 128 entries */
7513 -#define IXGBE_RAL(_i) (0x05400 + ((_i) * 8)) /* 16 of these (0-15) */
7514 -#define IXGBE_RAH(_i) (0x05404 + ((_i) * 8)) /* 16 of these (0-15) */
7515 -#define IXGBE_PSRTYPE 0x05480
7516 - /* 0x5480-0x54BC Packet split receive type */
7517 +#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : (0x0A200 + ((_i) * 8)))
7518 +#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : (0x0A204 + ((_i) * 8)))
7519 +/* Packet split receive type */
7520 +#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : (0x0EA00 + ((_i) * 4)))
7521 +/* array of 4096 1-bit vlan filters */
7522 #define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
7523 - /* array of 4096 1-bit vlan filters */
7524 +/*array of 4096 4-bit vlan vmdq indices */
7525 #define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
7526 - /*array of 4096 4-bit vlan vmdq indicies */
7527 #define IXGBE_FCTRL 0x05080
7528 #define IXGBE_VLNCTRL 0x05088
7529 #define IXGBE_MCSTCTRL 0x05090
7530 #define IXGBE_MRQC 0x05818
7531 -#define IXGBE_VMD_CTL 0x0581C
7532 #define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */
7533 #define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */
7534 #define IXGBE_IMIRVP 0x05AC0
7535 +#define IXGBE_VMD_CTL 0x0581C
7536 #define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
7537 #define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
7538
7539 +
7540 /* Transmit DMA registers */
7541 -#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40))/* 32 of these (0-31)*/
7542 +#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/
7543 #define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
7544 #define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
7545 #define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40))
7546 @@ -138,11 +153,10 @@
7547 #define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
7548 #define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
7549 #define IXGBE_DTXCTL 0x07E00
7550 -#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4))
7551 - /* there are 16 of these (0-15) */
7552 +
7553 +#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
7554 #define IXGBE_TIPG 0x0CB00
7555 -#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) *0x04))
7556 - /* there are 8 of these */
7557 +#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */
7558 #define IXGBE_MNGTXMAP 0x0CD10
7559 #define IXGBE_TIPG_FIBER_DEFAULT 3
7560 #define IXGBE_TXPBSIZE_SHIFT 10
7561 @@ -154,6 +168,7 @@
7562 #define IXGBE_IPAV 0x05838
7563 #define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */
7564 #define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */
7565 +
7566 #define IXGBE_WUPL 0x05900
7567 #define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
7568 #define IXGBE_FHFT 0x09000 /* Flex host filter table 9000-93FC */
7569 @@ -170,6 +185,8 @@
7570 #define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
7571 #define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
7572
7573 +
7574 +
7575 /* Stats registers */
7576 #define IXGBE_CRCERRS 0x04000
7577 #define IXGBE_ILLERRC 0x04004
7578 @@ -224,7 +241,7 @@
7579 #define IXGBE_XEC 0x04120
7580
7581 #define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) /* 16 of these */
7582 -#define IXGBE_TQSMR(_i) (0x07300 + ((_i) * 4)) /* 8 of these */
7583 +#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : (0x08600 + ((_i) * 4)))
7584
7585 #define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
7586 #define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
7587 @@ -275,23 +292,17 @@
7588 #define IXGBE_DCA_CTRL 0x11074
7589
7590 /* Diagnostic Registers */
7591 -#define IXGBE_RDSTATCTL 0x02C20
7592 -#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
7593 -#define IXGBE_RDHMPN 0x02F08
7594 -#define IXGBE_RIC_DW0 0x02F10
7595 -#define IXGBE_RIC_DW1 0x02F14
7596 -#define IXGBE_RIC_DW2 0x02F18
7597 -#define IXGBE_RIC_DW3 0x02F1C
7598 -#define IXGBE_RDPROBE 0x02F20
7599 -#define IXGBE_TDSTATCTL 0x07C20
7600 -#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
7601 -#define IXGBE_TDHMPN 0x07F08
7602 -#define IXGBE_TIC_DW0 0x07F10
7603 -#define IXGBE_TIC_DW1 0x07F14
7604 -#define IXGBE_TIC_DW2 0x07F18
7605 -#define IXGBE_TIC_DW3 0x07F1C
7606 -#define IXGBE_TDPROBE 0x07F20
7607 -#define IXGBE_TXBUFCTRL 0x0C600
7608 +#define IXGBE_RDSTATCTL 0x02C20
7609 +#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
7610 +#define IXGBE_RDHMPN 0x02F08
7611 +#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4))
7612 +#define IXGBE_RDPROBE 0x02F20
7613 +#define IXGBE_TDSTATCTL 0x07C20
7614 +#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
7615 +#define IXGBE_TDHMPN 0x07F08
7616 +#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4))
7617 +#define IXGBE_TDPROBE 0x07F20
7618 +#define IXGBE_TXBUFCTRL 0x0C600
7619 #define IXGBE_TXBUFDATA0 0x0C610
7620 #define IXGBE_TXBUFDATA1 0x0C614
7621 #define IXGBE_TXBUFDATA2 0x0C618
7622 @@ -356,12 +367,10 @@
7623 #define IXGBE_ANLP2 0x042B4
7624 #define IXGBE_ATLASCTL 0x04800
7625
7626 -/* RSCCTL Bit Masks */
7627 -#define IXGBE_RSCCTL_RSCEN 0x01
7628 -#define IXGBE_RSCCTL_MAXDESC_1 0x00
7629 -#define IXGBE_RSCCTL_MAXDESC_4 0x04
7630 -#define IXGBE_RSCCTL_MAXDESC_8 0x08
7631 -#define IXGBE_RSCCTL_MAXDESC_16 0x0C
7632 +/* RDRXCTL Bit Masks */
7633 +#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */
7634 +#define IXGBE_RDRXCTL_MVMEN 0x00000020
7635 +#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
7636
7637 /* CTRL Bit Masks */
7638 #define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
7639 @@ -394,7 +403,7 @@
7640
7641 #define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
7642 #define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
7643 -#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* TX Desc writeback RO bit */
7644 +#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
7645 #define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
7646
7647 /* MSCA Bit Masks */
7648 @@ -418,10 +427,10 @@
7649 #define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */
7650
7651 /* MSRWD bit masks */
7652 -#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF
7653 -#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0
7654 -#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000
7655 -#define IXGBE_MSRWD_READ_DATA_SHIFT 16
7656 +#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF
7657 +#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0
7658 +#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000
7659 +#define IXGBE_MSRWD_READ_DATA_SHIFT 16
7660
7661 /* Atlas registers */
7662 #define IXGBE_ATLAS_PDN_LPBK 0x24
7663 @@ -436,6 +445,7 @@
7664 #define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0
7665 #define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0
7666
7667 +
7668 /* Device Type definitions for new protocol MDIO commands */
7669 #define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
7670 #define IXGBE_MDIO_PCS_DEV_TYPE 0x3
7671 @@ -443,6 +453,8 @@
7672 #define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
7673 #define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
7674
7675 +#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
7676 +
7677 #define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */
7678 #define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */
7679 #define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */
7680 @@ -456,23 +468,39 @@
7681 #define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */
7682 #define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/
7683 #define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/
7684 -#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Abilty Reg */
7685 +#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */
7686 #define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */
7687 #define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */
7688
7689 +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Address Reg */
7690 +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
7691 +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
7692 +
7693 +/* MII clause 22/28 definitions */
7694 +#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800
7695 +
7696 +#define IXGBE_MII_SPEED_SELECTION_REG 0x10
7697 +#define IXGBE_MII_RESTART 0x200
7698 +#define IXGBE_MII_AUTONEG_COMPLETE 0x20
7699 +#define IXGBE_MII_AUTONEG_REG 0x0
7700 +
7701 #define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0
7702 #define IXGBE_MAX_PHY_ADDR 32
7703
7704 /* PHY IDs*/
7705 -#define TN1010_PHY_ID 0x00A19410
7706 #define QT2022_PHY_ID 0x0043A400
7707
7708 +/* PHY Types */
7709 +#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
7710 +
7711 /* General purpose Interrupt Enable */
7712 -#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
7713 -#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
7714 -#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
7715 -#define IXGBE_GPIE_EIAME 0x40000000
7716 -#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
7717 +#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
7718 +#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
7719 +#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
7720 +#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
7721 +#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
7722 +#define IXGBE_GPIE_EIAME 0x40000000
7723 +#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
7724
7725 /* Transmit Flow Control status */
7726 #define IXGBE_TFCS_TXOFF 0x00000001
7727 @@ -533,7 +561,7 @@
7728 #define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */
7729
7730 /* RMCS Bit Masks */
7731 -#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recylce Mode enable */
7732 +#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */
7733 /* Receive Arbitration Control: 0 Round Robin, 1 DFP */
7734 #define IXGBE_RMCS_RAC 0x00000004
7735 #define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */
7736 @@ -541,12 +569,15 @@
7737 #define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority flow control ena */
7738 #define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */
7739
7740 +
7741 /* Interrupt register bitmasks */
7742
7743 /* Extended Interrupt Cause Read */
7744 #define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
7745 #define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
7746 -#define IXGBE_EICR_MNG 0x00400000 /* Managability Event Interrupt */
7747 +#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
7748 +#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
7749 +#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
7750 #define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */
7751 #define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */
7752 #define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
7753 @@ -554,11 +585,12 @@
7754
7755 /* Extended Interrupt Cause Set */
7756 #define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
7757 -#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
7758 -#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
7759 -#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
7760 -#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */
7761 -#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
7762 +#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
7763 +#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
7764 +#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
7765 +#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
7766 +#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
7767 +#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
7768 #define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
7769 #define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
7770
7771 @@ -566,7 +598,9 @@
7772 #define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
7773 #define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
7774 #define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
7775 -#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */
7776 +#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
7777 +#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
7778 +#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
7779 #define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */
7780 #define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
7781 #define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
7782 @@ -575,18 +609,20 @@
7783 #define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
7784 #define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
7785 #define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
7786 -#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */
7787 -#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Error */
7788 +#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
7789 +#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
7790 +#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
7791 +#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */
7792 #define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
7793 #define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
7794
7795 -#define IXGBE_EIMS_ENABLE_MASK (\
7796 - IXGBE_EIMS_RTX_QUEUE | \
7797 - IXGBE_EIMS_LSC | \
7798 - IXGBE_EIMS_TCP_TIMER | \
7799 - IXGBE_EIMS_OTHER)
7800 +#define IXGBE_EIMS_ENABLE_MASK ( \
7801 + IXGBE_EIMS_RTX_QUEUE | \
7802 + IXGBE_EIMS_LSC | \
7803 + IXGBE_EIMS_TCP_TIMER | \
7804 + IXGBE_EIMS_OTHER)
7805
7806 -/* Immediate Interrupt RX (A.K.A. Low Latency Interrupt) */
7807 +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
7808 #define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
7809 #define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
7810 #define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
7811 @@ -623,6 +659,7 @@
7812 #define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */
7813 #define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */
7814
7815 +
7816 #define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
7817
7818 /* STATUS Bit Masks */
7819 @@ -670,16 +707,16 @@
7820 #define IXGBE_AUTOC_AN_RESTART 0x00001000
7821 #define IXGBE_AUTOC_FLU 0x00000001
7822 #define IXGBE_AUTOC_LMS_SHIFT 13
7823 -#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT)
7824 -#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT)
7825 -#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT)
7826 -#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT)
7827 -#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT)
7828 -#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
7829 -#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
7830 +#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT)
7831 +#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT)
7832 +#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT)
7833 +#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT)
7834 +#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT)
7835 +#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
7836 +#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
7837
7838 -#define IXGBE_AUTOC_1G_PMA_PMD 0x00000200
7839 -#define IXGBE_AUTOC_10G_PMA_PMD 0x00000180
7840 +#define IXGBE_AUTOC_1G_PMA_PMD 0x00000200
7841 +#define IXGBE_AUTOC_10G_PMA_PMD 0x00000180
7842 #define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
7843 #define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
7844 #define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
7845 @@ -705,6 +742,7 @@
7846 #define IXGBE_LINKS_TL_FAULT 0x00001000
7847 #define IXGBE_LINKS_SIGNAL 0x00000F00
7848
7849 +#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
7850 #define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
7851
7852 /* SW Semaphore Register bitmasks */
7853 @@ -759,6 +797,11 @@
7854 #define IXGBE_PBANUM0_PTR 0x15
7855 #define IXGBE_PBANUM1_PTR 0x16
7856
7857 +/* Legacy EEPROM word offsets */
7858 +#define IXGBE_ISCSI_BOOT_CAPS 0x0033
7859 +#define IXGBE_ISCSI_SETUP_PORT_0 0x0030
7860 +#define IXGBE_ISCSI_SETUP_PORT_1 0x0034
7861 +
7862 /* EEPROM Commands - SPI */
7863 #define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */
7864 #define IXGBE_EEPROM_STATUS_RDY_SPI 0x01
7865 @@ -766,7 +809,7 @@
7866 #define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
7867 #define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */
7868 #define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */
7869 -/* EEPROM reset Write Enbale latch */
7870 +/* EEPROM reset Write Enable latch */
7871 #define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04
7872 #define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */
7873 #define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */
7874 @@ -805,26 +848,20 @@
7875 /* Number of 100 microseconds we wait for PCI Express master disable */
7876 #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
7877
7878 -/* PHY Types */
7879 -#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
7880 -
7881 /* Check whether address is multicast. This is little-endian specific check.*/
7882 #define IXGBE_IS_MULTICAST(Address) \
7883 - (bool)(((u8 *)(Address))[0] & ((u8)0x01))
7884 + (bool)(((u8 *)(Address))[0] & ((u8)0x01))
7885
7886 /* Check whether an address is broadcast. */
7887 #define IXGBE_IS_BROADCAST(Address) \
7888 - ((((u8 *)(Address))[0] == ((u8)0xff)) && \
7889 - (((u8 *)(Address))[1] == ((u8)0xff)))
7890 + ((((u8 *)(Address))[0] == ((u8)0xff)) && \
7891 + (((u8 *)(Address))[1] == ((u8)0xff)))
7892
7893 /* RAH */
7894 #define IXGBE_RAH_VIND_MASK 0x003C0000
7895 #define IXGBE_RAH_VIND_SHIFT 18
7896 #define IXGBE_RAH_AV 0x80000000
7897 -
7898 -/* Filters */
7899 -#define IXGBE_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
7900 -#define IXGBE_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
7901 +#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF
7902
7903 /* Header split receive */
7904 #define IXGBE_RFCTL_ISCSI_DIS 0x00000001
7905 @@ -853,7 +890,7 @@
7906 #define IXGBE_MAX_FRAME_SZ 0x40040000
7907
7908 #define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */
7909 -#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq. # write-back enable */
7910 +#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */
7911
7912 /* Receive Config masks */
7913 #define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
7914 @@ -866,7 +903,7 @@
7915 #define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
7916 #define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */
7917 #define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */
7918 -/* Receive Priority Flow Control Enbale */
7919 +/* Receive Priority Flow Control Enable */
7920 #define IXGBE_FCTRL_RPFCE 0x00004000
7921 #define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */
7922
7923 @@ -896,9 +933,8 @@
7924 /* Receive Descriptor bit definitions */
7925 #define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
7926 #define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
7927 -#define IXGBE_RXD_STAT_IXSM 0x04 /* Ignore checksum */
7928 #define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
7929 -#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */
7930 +#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
7931 #define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
7932 #define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
7933 #define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
7934 @@ -914,7 +950,7 @@
7935 #define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
7936 #define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
7937 #define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
7938 -#define IXGBE_RXDADV_HBO 0x00800000
7939 +#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
7940 #define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
7941 #define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
7942 #define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
7943 @@ -928,15 +964,17 @@
7944 #define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
7945 #define IXGBE_RXD_CFI_SHIFT 12
7946
7947 +
7948 /* SRRCTL bit definitions */
7949 -#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
7950 -#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
7951 -#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
7952 -#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
7953 +#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
7954 +#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
7955 +#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
7956 +#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
7957 #define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
7958 #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
7959 #define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
7960 #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
7961 +#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
7962
7963 #define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000
7964 #define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
7965 @@ -970,21 +1008,20 @@
7966 #define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
7967 #define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
7968 #define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
7969 -
7970 /* Masks to determine if packets should be dropped due to frame errors */
7971 -#define IXGBE_RXD_ERR_FRAME_ERR_MASK (\
7972 - IXGBE_RXD_ERR_CE | \
7973 - IXGBE_RXD_ERR_LE | \
7974 - IXGBE_RXD_ERR_PE | \
7975 - IXGBE_RXD_ERR_OSE | \
7976 - IXGBE_RXD_ERR_USE)
7977 -
7978 -#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK (\
7979 - IXGBE_RXDADV_ERR_CE | \
7980 - IXGBE_RXDADV_ERR_LE | \
7981 - IXGBE_RXDADV_ERR_PE | \
7982 - IXGBE_RXDADV_ERR_OSE | \
7983 - IXGBE_RXDADV_ERR_USE)
7984 +#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
7985 + IXGBE_RXD_ERR_CE | \
7986 + IXGBE_RXD_ERR_LE | \
7987 + IXGBE_RXD_ERR_PE | \
7988 + IXGBE_RXD_ERR_OSE | \
7989 + IXGBE_RXD_ERR_USE)
7990 +
7991 +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
7992 + IXGBE_RXDADV_ERR_CE | \
7993 + IXGBE_RXDADV_ERR_LE | \
7994 + IXGBE_RXDADV_ERR_PE | \
7995 + IXGBE_RXDADV_ERR_OSE | \
7996 + IXGBE_RXDADV_ERR_USE)
7997
7998 /* Multicast bit mask */
7999 #define IXGBE_MCSTCTRL_MFE 0x4
8000 @@ -1000,6 +1037,7 @@
8001 #define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
8002 #define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
8003
8004 +
8005 /* Transmit Descriptor - Legacy */
8006 struct ixgbe_legacy_tx_desc {
8007 u64 buffer_addr; /* Address of the descriptor's data buffer */
8008 @@ -1007,15 +1045,15 @@ struct ixgbe_legacy_tx_desc {
8009 __le32 data;
8010 struct {
8011 __le16 length; /* Data buffer length */
8012 - u8 cso; /* Checksum offset */
8013 - u8 cmd; /* Descriptor control */
8014 + u8 cso; /* Checksum offset */
8015 + u8 cmd; /* Descriptor control */
8016 } flags;
8017 } lower;
8018 union {
8019 __le32 data;
8020 struct {
8021 - u8 status; /* Descriptor status */
8022 - u8 css; /* Checksum start */
8023 + u8 status; /* Descriptor status */
8024 + u8 css; /* Checksum start */
8025 __le16 vlan;
8026 } fields;
8027 } upper;
8028 @@ -1024,7 +1062,7 @@ struct ixgbe_legacy_tx_desc {
8029 /* Transmit Descriptor - Advanced */
8030 union ixgbe_adv_tx_desc {
8031 struct {
8032 - __le64 buffer_addr; /* Address of descriptor's data buf */
8033 + __le64 buffer_addr; /* Address of descriptor's data buf */
8034 __le32 cmd_type_len;
8035 __le32 olinfo_status;
8036 } read;
8037 @@ -1039,9 +1077,9 @@ union ixgbe_adv_tx_desc {
8038 struct ixgbe_legacy_rx_desc {
8039 __le64 buffer_addr; /* Address of the descriptor's data buffer */
8040 __le16 length; /* Length of data DMAed into data buffer */
8041 - u16 csum; /* Packet checksum */
8042 - u8 status; /* Descriptor status */
8043 - u8 errors; /* Descriptor Errors */
8044 + __le16 csum; /* Packet checksum */
8045 + u8 status; /* Descriptor status */
8046 + u8 errors; /* Descriptor Errors */
8047 __le16 vlan;
8048 };
8049
8050 @@ -1053,15 +1091,18 @@ union ixgbe_adv_rx_desc {
8051 } read;
8052 struct {
8053 struct {
8054 - struct {
8055 - __le16 pkt_info; /* RSS type, Packet type */
8056 - __le16 hdr_info; /* Split Header, header len */
8057 + union {
8058 + __le32 data;
8059 + struct {
8060 + __le16 pkt_info; /* RSS, Pkt type */
8061 + __le16 hdr_info; /* Splithdr, hdrlen */
8062 + } hs_rss;
8063 } lo_dword;
8064 union {
8065 __le32 rss; /* RSS Hash */
8066 struct {
8067 __le16 ip_id; /* IP id */
8068 - u16 csum; /* Packet Checksum */
8069 + __le16 csum; /* Packet Checksum */
8070 } csum_ip;
8071 } hi_dword;
8072 } lower;
8073 @@ -1082,49 +1123,69 @@ struct ixgbe_adv_tx_context_desc {
8074 };
8075
8076 /* Adv Transmit Descriptor Config Masks */
8077 -#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buffer length(bytes) */
8078 +#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */
8079 #define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
8080 #define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
8081 #define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
8082 #define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
8083 #define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
8084 -#define IXGBE_ADVTXD_DCMD_RDMA 0x04000000 /* RDMA */
8085 #define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
8086 -#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
8087 +#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
8088 #define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
8089 #define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
8090 #define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
8091 #define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
8092 -#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED present in WB */
8093 +#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */
8094 #define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */
8095 #define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
8096 +#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
8097 #define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
8098 #define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
8099 - IXGBE_ADVTXD_POPTS_SHIFT)
8100 + IXGBE_ADVTXD_POPTS_SHIFT)
8101 #define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
8102 - IXGBE_ADVTXD_POPTS_SHIFT)
8103 -#define IXGBE_ADVTXD_POPTS_EOM 0x00000400 /* Enable L bit-RDMA DDP hdr */
8104 -#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
8105 -#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
8106 -#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
8107 -#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/
8108 -#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */
8109 -#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
8110 -#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
8111 -#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
8112 -#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
8113 -#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
8114 -#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
8115 -#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
8116 -#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* Req requires Markers and CRC */
8117 -#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
8118 -#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
8119 + IXGBE_ADVTXD_POPTS_SHIFT)
8120 +#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
8121 +#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
8122 +#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
8123 +#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */
8124 +#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */
8125 +#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
8126 +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
8127 +#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
8128 +#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
8129 +#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
8130 +#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
8131 +#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
8132 +#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
8133 +#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/
8134 +#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
8135 +#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
8136
8137 +/* Autonegotiation advertised speeds */
8138 +typedef u32 ixgbe_autoneg_advertised;
8139 /* Link speed */
8140 +typedef u32 ixgbe_link_speed;
8141 #define IXGBE_LINK_SPEED_UNKNOWN 0
8142 #define IXGBE_LINK_SPEED_100_FULL 0x0008
8143 #define IXGBE_LINK_SPEED_1GB_FULL 0x0020
8144 #define IXGBE_LINK_SPEED_10GB_FULL 0x0080
8145 +#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
8146 + IXGBE_LINK_SPEED_10GB_FULL)
8147 +
8148 +/* Physical layer type */
8149 +typedef u32 ixgbe_physical_layer;
8150 +#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
8151 +#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001
8152 +#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002
8153 +#define IXGBE_PHYSICAL_LAYER_100BASE_T 0x0004
8154 +#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008
8155 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010
8156 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020
8157 +#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040
8158 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080
8159 +#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100
8160 +#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200
8161 +#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
8162
8163
8164 enum ixgbe_eeprom_type {
8165 @@ -1141,16 +1202,38 @@ enum ixgbe_mac_type {
8166
8167 enum ixgbe_phy_type {
8168 ixgbe_phy_unknown = 0,
8169 - ixgbe_phy_tn,
8170 ixgbe_phy_qt,
8171 - ixgbe_phy_xaui
8172 + ixgbe_phy_xaui,
8173 + ixgbe_phy_tw_tyco,
8174 + ixgbe_phy_tw_unknown,
8175 + ixgbe_phy_sfp_avago,
8176 + ixgbe_phy_sfp_ftl,
8177 + ixgbe_phy_sfp_unknown,
8178 + ixgbe_phy_generic
8179 +};
8180 +
8181 +/*
8182 + * SFP+ module type IDs:
8183 + *
8184 + * ID Module Type
8185 + * =============
8186 + * 0 SFP_DA_CU
8187 + * 1 SFP_SR
8188 + * 2 SFP_LR
8189 + */
8190 +enum ixgbe_sfp_type {
8191 + ixgbe_sfp_type_da_cu = 0,
8192 + ixgbe_sfp_type_sr = 1,
8193 + ixgbe_sfp_type_lr = 2,
8194 + ixgbe_sfp_type_unknown = 0xFFFF
8195 };
8196
8197 enum ixgbe_media_type {
8198 ixgbe_media_type_unknown = 0,
8199 ixgbe_media_type_fiber,
8200 ixgbe_media_type_copper,
8201 - ixgbe_media_type_backplane
8202 + ixgbe_media_type_backplane,
8203 + ixgbe_media_type_virtual
8204 };
8205
8206 /* Flow Control Settings */
8207 @@ -1167,6 +1250,8 @@ struct ixgbe_addr_filter_info {
8208 u32 rar_used_count;
8209 u32 mc_addr_in_rar_count;
8210 u32 mta_in_use;
8211 + u32 overflow_promisc;
8212 + bool user_set_promisc;
8213 };
8214
8215 /* Flow control parameters */
8216 @@ -1242,57 +1327,118 @@ struct ixgbe_hw_stats {
8217 /* forward declaration */
8218 struct ixgbe_hw;
8219
8220 +/* iterator type for walking multicast address lists */
8221 +typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
8222 + u32 *vmdq);
8223 +
8224 +/* Function pointer table */
8225 +struct ixgbe_eeprom_operations {
8226 + s32 (*init_params)(struct ixgbe_hw *);
8227 + s32 (*read)(struct ixgbe_hw *, u16, u16 *);
8228 + s32 (*write)(struct ixgbe_hw *, u16, u16);
8229 + s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
8230 + s32 (*update_checksum)(struct ixgbe_hw *);
8231 +};
8232 +
8233 struct ixgbe_mac_operations {
8234 - s32 (*reset)(struct ixgbe_hw *);
8235 + s32 (*init_hw)(struct ixgbe_hw *);
8236 + s32 (*reset_hw)(struct ixgbe_hw *);
8237 + s32 (*start_hw)(struct ixgbe_hw *);
8238 + s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
8239 enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
8240 + s32 (*get_supported_physical_layer)(struct ixgbe_hw *);
8241 + s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
8242 + s32 (*stop_adapter)(struct ixgbe_hw *);
8243 + s32 (*get_bus_info)(struct ixgbe_hw *);
8244 + s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
8245 + s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
8246 +
8247 + /* Link */
8248 s32 (*setup_link)(struct ixgbe_hw *);
8249 - s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *);
8250 - s32 (*setup_link_speed)(struct ixgbe_hw *, u32, bool, bool);
8251 - s32 (*get_link_settings)(struct ixgbe_hw *, u32 *, bool *);
8252 + s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
8253 + bool);
8254 + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
8255 + s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
8256 + bool *);
8257 +
8258 + /* LED */
8259 + s32 (*led_on)(struct ixgbe_hw *, u32);
8260 + s32 (*led_off)(struct ixgbe_hw *, u32);
8261 + s32 (*blink_led_start)(struct ixgbe_hw *, u32);
8262 + s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
8263 +
8264 + /* RAR, Multicast, VLAN */
8265 + s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
8266 + s32 (*clear_rar)(struct ixgbe_hw *, u32);
8267 + s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
8268 + s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
8269 + s32 (*init_rx_addrs)(struct ixgbe_hw *);
8270 + s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32,
8271 + ixgbe_mc_addr_itr);
8272 + s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
8273 + ixgbe_mc_addr_itr);
8274 + s32 (*enable_mc)(struct ixgbe_hw *);
8275 + s32 (*disable_mc)(struct ixgbe_hw *);
8276 + s32 (*clear_vfta)(struct ixgbe_hw *);
8277 + s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
8278 + s32 (*init_uta_tables)(struct ixgbe_hw *);
8279 +
8280 + /* Flow Control */
8281 + s32 (*setup_fc)(struct ixgbe_hw *, s32);
8282 };
8283
8284 struct ixgbe_phy_operations {
8285 + s32 (*identify)(struct ixgbe_hw *);
8286 + s32 (*identify_sfp)(struct ixgbe_hw *);
8287 + s32 (*reset)(struct ixgbe_hw *);
8288 + s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
8289 + s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
8290 s32 (*setup_link)(struct ixgbe_hw *);
8291 - s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *);
8292 - s32 (*setup_link_speed)(struct ixgbe_hw *, u32, bool, bool);
8293 -};
8294 -
8295 -struct ixgbe_mac_info {
8296 - struct ixgbe_mac_operations ops;
8297 - enum ixgbe_mac_type type;
8298 - u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
8299 - u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
8300 - s32 mc_filter_type;
8301 - u32 num_rx_queues;
8302 - u32 num_tx_queues;
8303 - u32 num_rx_addrs;
8304 - u32 link_attach_type;
8305 - u32 link_mode_select;
8306 - bool link_settings_loaded;
8307 + s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
8308 + bool);
8309 + s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
8310 + s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
8311 + s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
8312 + s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
8313 };
8314
8315 struct ixgbe_eeprom_info {
8316 - enum ixgbe_eeprom_type type;
8317 - u16 word_size;
8318 - u16 address_bits;
8319 + struct ixgbe_eeprom_operations ops;
8320 + enum ixgbe_eeprom_type type;
8321 + u32 semaphore_delay;
8322 + u16 word_size;
8323 + u16 address_bits;
8324 };
8325
8326 -struct ixgbe_phy_info {
8327 - struct ixgbe_phy_operations ops;
8328 -
8329 - enum ixgbe_phy_type type;
8330 - u32 addr;
8331 - u32 id;
8332 - u32 revision;
8333 - enum ixgbe_media_type media_type;
8334 - u32 autoneg_advertised;
8335 - bool autoneg_wait_to_complete;
8336 +struct ixgbe_mac_info {
8337 + struct ixgbe_mac_operations ops;
8338 + enum ixgbe_mac_type type;
8339 + u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
8340 + u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
8341 + s32 mc_filter_type;
8342 + u32 mcft_size;
8343 + u32 vft_size;
8344 + u32 num_rar_entries;
8345 + u32 max_tx_queues;
8346 + u32 max_rx_queues;
8347 + u32 link_attach_type;
8348 + u32 link_mode_select;
8349 + bool link_settings_loaded;
8350 + bool autoneg;
8351 + bool autoneg_failed;
8352 };
8353
8354 -struct ixgbe_info {
8355 - enum ixgbe_mac_type mac;
8356 - s32 (*get_invariants)(struct ixgbe_hw *);
8357 - struct ixgbe_mac_operations *mac_ops;
8358 +struct ixgbe_phy_info {
8359 + struct ixgbe_phy_operations ops;
8360 + enum ixgbe_phy_type type;
8361 + u32 addr;
8362 + u32 id;
8363 + enum ixgbe_sfp_type sfp_type;
8364 + u32 revision;
8365 + enum ixgbe_media_type media_type;
8366 + bool reset_disable;
8367 + ixgbe_autoneg_advertised autoneg_advertised;
8368 + bool autoneg_wait_to_complete;
8369 };
8370
8371 struct ixgbe_hw {
8372 @@ -1311,6 +1457,15 @@ struct ixgbe_hw {
8373 bool adapter_stopped;
8374 };
8375
8376 +struct ixgbe_info {
8377 + enum ixgbe_mac_type mac;
8378 + s32 (*get_invariants)(struct ixgbe_hw *);
8379 + struct ixgbe_mac_operations *mac_ops;
8380 + struct ixgbe_eeprom_operations *eeprom_ops;
8381 + struct ixgbe_phy_operations *phy_ops;
8382 +};
8383 +
8384 +
8385 /* Error Codes */
8386 #define IXGBE_ERR_EEPROM -1
8387 #define IXGBE_ERR_EEPROM_CHECKSUM -2
8388 @@ -1329,6 +1484,8 @@ struct ixgbe_hw {
8389 #define IXGBE_ERR_RESET_FAILED -15
8390 #define IXGBE_ERR_SWFW_SYNC -16
8391 #define IXGBE_ERR_PHY_ADDR_INVALID -17
8392 +#define IXGBE_ERR_I2C -18
8393 +#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
8394 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
8395
8396 #endif /* _IXGBE_TYPE_H_ */
8397 --- a/drivers/net/Kconfig
8398 +++ b/drivers/net/Kconfig
8399 @@ -2381,7 +2381,6 @@ config EHEA
8400 config IXGBE
8401 tristate "Intel(R) 10GbE PCI Express adapters support"
8402 depends on PCI && INET
8403 - select INET_LRO
8404 ---help---
8405 This driver supports Intel(R) 10GbE PCI Express family of
8406 adapters. For more information on how to identify your adapter, go
8407 @@ -2397,6 +2396,16 @@ config IXGBE
8408 To compile this driver as a module, choose M here. The module
8409 will be called ixgbe.
8410
8411 +config IXGBE_LRO
8412 + bool "Use software LRO"
8413 + depends on IXGBE && INET
8414 + select INET_LRO
8415 + default y
8416 + ---help---
8417 + Say Y here if you want to use large receive offload.
8418 +
8419 + If in doubt, say N.
8420 +
8421 config IXGB
8422 tristate "Intel(R) PRO/10GbE support"
8423 depends on PCI