1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2013 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/notifier.h>
18 #include <linux/tcp.h>
20 #include <linux/ethtool.h>
21 #include <linux/topology.h>
22 #include <linux/gfp.h>
23 #include <linux/aer.h>
24 #include <linux/interrupt.h>
25 #include "net_driver.h"
30 #include "workarounds.h"
32 /**************************************************************************
36 **************************************************************************
39 /* Loopback mode names (see LOOPBACK_MODE()) */
40 const unsigned int ef4_loopback_mode_max
= LOOPBACK_MAX
;
41 const char *const ef4_loopback_mode_names
[] = {
42 [LOOPBACK_NONE
] = "NONE",
43 [LOOPBACK_DATA
] = "DATAPATH",
44 [LOOPBACK_GMAC
] = "GMAC",
45 [LOOPBACK_XGMII
] = "XGMII",
46 [LOOPBACK_XGXS
] = "XGXS",
47 [LOOPBACK_XAUI
] = "XAUI",
48 [LOOPBACK_GMII
] = "GMII",
49 [LOOPBACK_SGMII
] = "SGMII",
50 [LOOPBACK_XGBR
] = "XGBR",
51 [LOOPBACK_XFI
] = "XFI",
52 [LOOPBACK_XAUI_FAR
] = "XAUI_FAR",
53 [LOOPBACK_GMII_FAR
] = "GMII_FAR",
54 [LOOPBACK_SGMII_FAR
] = "SGMII_FAR",
55 [LOOPBACK_XFI_FAR
] = "XFI_FAR",
56 [LOOPBACK_GPHY
] = "GPHY",
57 [LOOPBACK_PHYXS
] = "PHYXS",
58 [LOOPBACK_PCS
] = "PCS",
59 [LOOPBACK_PMAPMD
] = "PMA/PMD",
60 [LOOPBACK_XPORT
] = "XPORT",
61 [LOOPBACK_XGMII_WS
] = "XGMII_WS",
62 [LOOPBACK_XAUI_WS
] = "XAUI_WS",
63 [LOOPBACK_XAUI_WS_FAR
] = "XAUI_WS_FAR",
64 [LOOPBACK_XAUI_WS_NEAR
] = "XAUI_WS_NEAR",
65 [LOOPBACK_GMII_WS
] = "GMII_WS",
66 [LOOPBACK_XFI_WS
] = "XFI_WS",
67 [LOOPBACK_XFI_WS_FAR
] = "XFI_WS_FAR",
68 [LOOPBACK_PHYXS_WS
] = "PHYXS_WS",
71 const unsigned int ef4_reset_type_max
= RESET_TYPE_MAX
;
72 const char *const ef4_reset_type_names
[] = {
73 [RESET_TYPE_INVISIBLE
] = "INVISIBLE",
74 [RESET_TYPE_ALL
] = "ALL",
75 [RESET_TYPE_RECOVER_OR_ALL
] = "RECOVER_OR_ALL",
76 [RESET_TYPE_WORLD
] = "WORLD",
77 [RESET_TYPE_RECOVER_OR_DISABLE
] = "RECOVER_OR_DISABLE",
78 [RESET_TYPE_DATAPATH
] = "DATAPATH",
79 [RESET_TYPE_DISABLE
] = "DISABLE",
80 [RESET_TYPE_TX_WATCHDOG
] = "TX_WATCHDOG",
81 [RESET_TYPE_INT_ERROR
] = "INT_ERROR",
82 [RESET_TYPE_RX_RECOVERY
] = "RX_RECOVERY",
83 [RESET_TYPE_DMA_ERROR
] = "DMA_ERROR",
84 [RESET_TYPE_TX_SKIP
] = "TX_SKIP",
87 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
88 * queued onto this work queue. This is not a per-nic work queue, because
89 * ef4_reset_work() acquires the rtnl lock, so resets are naturally serialised.
91 static struct workqueue_struct
*reset_workqueue
;
93 /* How often and how many times to poll for a reset while waiting for a
94 * BIST that another function started to complete.
96 #define BIST_WAIT_DELAY_MS 100
97 #define BIST_WAIT_DELAY_COUNT 100
99 /**************************************************************************
101 * Configurable values
103 *************************************************************************/
106 * Use separate channels for TX and RX events
108 * Set this to 1 to use separate channels for TX and RX. It allows us
109 * to control interrupt affinity separately for TX and RX.
111 * This is only used in MSI-X interrupt mode
113 bool ef4_separate_tx_channels
;
114 module_param(ef4_separate_tx_channels
, bool, 0444);
115 MODULE_PARM_DESC(ef4_separate_tx_channels
,
116 "Use separate channels for TX and RX");
118 /* This is the weight assigned to each of the (per-channel) virtual
121 static int napi_weight
= 64;
123 /* This is the time (in jiffies) between invocations of the hardware
125 * On Falcon-based NICs, this will:
126 * - Check the on-board hardware monitor;
127 * - Poll the link state and reconfigure the hardware as necessary.
128 * On Siena-based NICs for power systems with EEH support, this will give EEH a
131 static unsigned int ef4_monitor_interval
= 1 * HZ
;
133 /* Initial interrupt moderation settings. They can be modified after
134 * module load with ethtool.
136 * The default for RX should strike a balance between increasing the
137 * round-trip latency and reducing overhead.
139 static unsigned int rx_irq_mod_usec
= 60;
141 /* Initial interrupt moderation settings. They can be modified after
142 * module load with ethtool.
144 * This default is chosen to ensure that a 10G link does not go idle
145 * while a TX queue is stopped after it has become full. A queue is
146 * restarted when it drops below half full. The time this takes (assuming
147 * worst case 3 descriptors per packet and 1024 descriptors) is
148 * 512 / 3 * 1.2 = 205 usec.
150 static unsigned int tx_irq_mod_usec
= 150;
152 /* This is the first interrupt mode to try out of:
157 static unsigned int interrupt_mode
;
159 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
160 * i.e. the number of CPUs among which we may distribute simultaneous
161 * interrupt handling.
163 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
164 * The default (0) means to assign an interrupt to each core.
166 static unsigned int rss_cpus
;
167 module_param(rss_cpus
, uint
, 0444);
168 MODULE_PARM_DESC(rss_cpus
, "Number of CPUs to use for Receive-Side Scaling");
170 static bool phy_flash_cfg
;
171 module_param(phy_flash_cfg
, bool, 0644);
172 MODULE_PARM_DESC(phy_flash_cfg
, "Set PHYs into reflash mode initially");
174 static unsigned irq_adapt_low_thresh
= 8000;
175 module_param(irq_adapt_low_thresh
, uint
, 0644);
176 MODULE_PARM_DESC(irq_adapt_low_thresh
,
177 "Threshold score for reducing IRQ moderation");
179 static unsigned irq_adapt_high_thresh
= 16000;
180 module_param(irq_adapt_high_thresh
, uint
, 0644);
181 MODULE_PARM_DESC(irq_adapt_high_thresh
,
182 "Threshold score for increasing IRQ moderation");
184 static unsigned debug
= (NETIF_MSG_DRV
| NETIF_MSG_PROBE
|
185 NETIF_MSG_LINK
| NETIF_MSG_IFDOWN
|
186 NETIF_MSG_IFUP
| NETIF_MSG_RX_ERR
|
187 NETIF_MSG_TX_ERR
| NETIF_MSG_HW
);
188 module_param(debug
, uint
, 0);
189 MODULE_PARM_DESC(debug
, "Bitmapped debugging message enable value");
191 /**************************************************************************
193 * Utility functions and prototypes
195 *************************************************************************/
197 static int ef4_soft_enable_interrupts(struct ef4_nic
*efx
);
198 static void ef4_soft_disable_interrupts(struct ef4_nic
*efx
);
199 static void ef4_remove_channel(struct ef4_channel
*channel
);
200 static void ef4_remove_channels(struct ef4_nic
*efx
);
201 static const struct ef4_channel_type ef4_default_channel_type
;
202 static void ef4_remove_port(struct ef4_nic
*efx
);
203 static void ef4_init_napi_channel(struct ef4_channel
*channel
);
204 static void ef4_fini_napi(struct ef4_nic
*efx
);
205 static void ef4_fini_napi_channel(struct ef4_channel
*channel
);
206 static void ef4_fini_struct(struct ef4_nic
*efx
);
207 static void ef4_start_all(struct ef4_nic
*efx
);
208 static void ef4_stop_all(struct ef4_nic
*efx
);
210 #define EF4_ASSERT_RESET_SERIALISED(efx) \
212 if ((efx->state == STATE_READY) || \
213 (efx->state == STATE_RECOVERY) || \
214 (efx->state == STATE_DISABLED)) \
218 static int ef4_check_disabled(struct ef4_nic
*efx
)
220 if (efx
->state
== STATE_DISABLED
|| efx
->state
== STATE_RECOVERY
) {
221 netif_err(efx
, drv
, efx
->net_dev
,
222 "device is disabled due to earlier errors\n");
228 /**************************************************************************
230 * Event queue processing
232 *************************************************************************/
234 /* Process channel's event queue
236 * This function is responsible for processing the event queue of a
237 * single channel. The caller must guarantee that this function will
238 * never be concurrently called more than once on the same channel,
239 * though different channels may be being processed concurrently.
241 static int ef4_process_channel(struct ef4_channel
*channel
, int budget
)
243 struct ef4_tx_queue
*tx_queue
;
246 if (unlikely(!channel
->enabled
))
249 ef4_for_each_channel_tx_queue(tx_queue
, channel
) {
250 tx_queue
->pkts_compl
= 0;
251 tx_queue
->bytes_compl
= 0;
254 spent
= ef4_nic_process_eventq(channel
, budget
);
255 if (spent
&& ef4_channel_has_rx_queue(channel
)) {
256 struct ef4_rx_queue
*rx_queue
=
257 ef4_channel_get_rx_queue(channel
);
259 ef4_rx_flush_packet(channel
);
260 ef4_fast_push_rx_descriptors(rx_queue
, true);
264 ef4_for_each_channel_tx_queue(tx_queue
, channel
) {
265 if (tx_queue
->bytes_compl
) {
266 netdev_tx_completed_queue(tx_queue
->core_txq
,
267 tx_queue
->pkts_compl
, tx_queue
->bytes_compl
);
276 * NAPI guarantees serialisation of polls of the same device, which
277 * provides the guarantee required by ef4_process_channel().
279 static void ef4_update_irq_mod(struct ef4_nic
*efx
, struct ef4_channel
*channel
)
281 int step
= efx
->irq_mod_step_us
;
283 if (channel
->irq_mod_score
< irq_adapt_low_thresh
) {
284 if (channel
->irq_moderation_us
> step
) {
285 channel
->irq_moderation_us
-= step
;
286 efx
->type
->push_irq_moderation(channel
);
288 } else if (channel
->irq_mod_score
> irq_adapt_high_thresh
) {
289 if (channel
->irq_moderation_us
<
290 efx
->irq_rx_moderation_us
) {
291 channel
->irq_moderation_us
+= step
;
292 efx
->type
->push_irq_moderation(channel
);
296 channel
->irq_count
= 0;
297 channel
->irq_mod_score
= 0;
300 static int ef4_poll(struct napi_struct
*napi
, int budget
)
302 struct ef4_channel
*channel
=
303 container_of(napi
, struct ef4_channel
, napi_str
);
304 struct ef4_nic
*efx
= channel
->efx
;
307 netif_vdbg(efx
, intr
, efx
->net_dev
,
308 "channel %d NAPI poll executing on CPU %d\n",
309 channel
->channel
, raw_smp_processor_id());
311 spent
= ef4_process_channel(channel
, budget
);
313 if (spent
< budget
) {
314 if (ef4_channel_has_rx_queue(channel
) &&
315 efx
->irq_rx_adaptive
&&
316 unlikely(++channel
->irq_count
== 1000)) {
317 ef4_update_irq_mod(efx
, channel
);
320 ef4_filter_rfs_expire(channel
);
322 /* There is no race here; although napi_disable() will
323 * only wait for napi_complete(), this isn't a problem
324 * since ef4_nic_eventq_read_ack() will have no effect if
325 * interrupts have already been disabled.
327 napi_complete_done(napi
, spent
);
328 ef4_nic_eventq_read_ack(channel
);
334 /* Create event queue
335 * Event queue memory allocations are done only once. If the channel
336 * is reset, the memory buffer will be reused; this guards against
337 * errors during channel reset and also simplifies interrupt handling.
339 static int ef4_probe_eventq(struct ef4_channel
*channel
)
341 struct ef4_nic
*efx
= channel
->efx
;
342 unsigned long entries
;
344 netif_dbg(efx
, probe
, efx
->net_dev
,
345 "chan %d create event queue\n", channel
->channel
);
347 /* Build an event queue with room for one event per tx and rx buffer,
348 * plus some extra for link state events and MCDI completions. */
349 entries
= roundup_pow_of_two(efx
->rxq_entries
+ efx
->txq_entries
+ 128);
350 EF4_BUG_ON_PARANOID(entries
> EF4_MAX_EVQ_SIZE
);
351 channel
->eventq_mask
= max(entries
, EF4_MIN_EVQ_SIZE
) - 1;
353 return ef4_nic_probe_eventq(channel
);
356 /* Prepare channel's event queue */
357 static int ef4_init_eventq(struct ef4_channel
*channel
)
359 struct ef4_nic
*efx
= channel
->efx
;
362 EF4_WARN_ON_PARANOID(channel
->eventq_init
);
364 netif_dbg(efx
, drv
, efx
->net_dev
,
365 "chan %d init event queue\n", channel
->channel
);
367 rc
= ef4_nic_init_eventq(channel
);
369 efx
->type
->push_irq_moderation(channel
);
370 channel
->eventq_read_ptr
= 0;
371 channel
->eventq_init
= true;
376 /* Enable event queue processing and NAPI */
377 void ef4_start_eventq(struct ef4_channel
*channel
)
379 netif_dbg(channel
->efx
, ifup
, channel
->efx
->net_dev
,
380 "chan %d start event queue\n", channel
->channel
);
382 /* Make sure the NAPI handler sees the enabled flag set */
383 channel
->enabled
= true;
386 napi_enable(&channel
->napi_str
);
387 ef4_nic_eventq_read_ack(channel
);
390 /* Disable event queue processing and NAPI */
391 void ef4_stop_eventq(struct ef4_channel
*channel
)
393 if (!channel
->enabled
)
396 napi_disable(&channel
->napi_str
);
397 channel
->enabled
= false;
400 static void ef4_fini_eventq(struct ef4_channel
*channel
)
402 if (!channel
->eventq_init
)
405 netif_dbg(channel
->efx
, drv
, channel
->efx
->net_dev
,
406 "chan %d fini event queue\n", channel
->channel
);
408 ef4_nic_fini_eventq(channel
);
409 channel
->eventq_init
= false;
412 static void ef4_remove_eventq(struct ef4_channel
*channel
)
414 netif_dbg(channel
->efx
, drv
, channel
->efx
->net_dev
,
415 "chan %d remove event queue\n", channel
->channel
);
417 ef4_nic_remove_eventq(channel
);
420 /**************************************************************************
424 *************************************************************************/
426 /* Allocate and initialise a channel structure. */
427 static struct ef4_channel
*
428 ef4_alloc_channel(struct ef4_nic
*efx
, int i
, struct ef4_channel
*old_channel
)
430 struct ef4_channel
*channel
;
431 struct ef4_rx_queue
*rx_queue
;
432 struct ef4_tx_queue
*tx_queue
;
435 channel
= kzalloc(sizeof(*channel
), GFP_KERNEL
);
440 channel
->channel
= i
;
441 channel
->type
= &ef4_default_channel_type
;
443 for (j
= 0; j
< EF4_TXQ_TYPES
; j
++) {
444 tx_queue
= &channel
->tx_queue
[j
];
446 tx_queue
->queue
= i
* EF4_TXQ_TYPES
+ j
;
447 tx_queue
->channel
= channel
;
450 rx_queue
= &channel
->rx_queue
;
452 setup_timer(&rx_queue
->slow_fill
, ef4_rx_slow_fill
,
453 (unsigned long)rx_queue
);
458 /* Allocate and initialise a channel structure, copying parameters
459 * (but not resources) from an old channel structure.
461 static struct ef4_channel
*
462 ef4_copy_channel(const struct ef4_channel
*old_channel
)
464 struct ef4_channel
*channel
;
465 struct ef4_rx_queue
*rx_queue
;
466 struct ef4_tx_queue
*tx_queue
;
469 channel
= kmalloc(sizeof(*channel
), GFP_KERNEL
);
473 *channel
= *old_channel
;
475 channel
->napi_dev
= NULL
;
476 INIT_HLIST_NODE(&channel
->napi_str
.napi_hash_node
);
477 channel
->napi_str
.napi_id
= 0;
478 channel
->napi_str
.state
= 0;
479 memset(&channel
->eventq
, 0, sizeof(channel
->eventq
));
481 for (j
= 0; j
< EF4_TXQ_TYPES
; j
++) {
482 tx_queue
= &channel
->tx_queue
[j
];
483 if (tx_queue
->channel
)
484 tx_queue
->channel
= channel
;
485 tx_queue
->buffer
= NULL
;
486 memset(&tx_queue
->txd
, 0, sizeof(tx_queue
->txd
));
489 rx_queue
= &channel
->rx_queue
;
490 rx_queue
->buffer
= NULL
;
491 memset(&rx_queue
->rxd
, 0, sizeof(rx_queue
->rxd
));
492 setup_timer(&rx_queue
->slow_fill
, ef4_rx_slow_fill
,
493 (unsigned long)rx_queue
);
498 static int ef4_probe_channel(struct ef4_channel
*channel
)
500 struct ef4_tx_queue
*tx_queue
;
501 struct ef4_rx_queue
*rx_queue
;
504 netif_dbg(channel
->efx
, probe
, channel
->efx
->net_dev
,
505 "creating channel %d\n", channel
->channel
);
507 rc
= channel
->type
->pre_probe(channel
);
511 rc
= ef4_probe_eventq(channel
);
515 ef4_for_each_channel_tx_queue(tx_queue
, channel
) {
516 rc
= ef4_probe_tx_queue(tx_queue
);
521 ef4_for_each_channel_rx_queue(rx_queue
, channel
) {
522 rc
= ef4_probe_rx_queue(rx_queue
);
530 ef4_remove_channel(channel
);
535 ef4_get_channel_name(struct ef4_channel
*channel
, char *buf
, size_t len
)
537 struct ef4_nic
*efx
= channel
->efx
;
541 number
= channel
->channel
;
542 if (efx
->tx_channel_offset
== 0) {
544 } else if (channel
->channel
< efx
->tx_channel_offset
) {
548 number
-= efx
->tx_channel_offset
;
550 snprintf(buf
, len
, "%s%s-%d", efx
->name
, type
, number
);
553 static void ef4_set_channel_names(struct ef4_nic
*efx
)
555 struct ef4_channel
*channel
;
557 ef4_for_each_channel(channel
, efx
)
558 channel
->type
->get_name(channel
,
559 efx
->msi_context
[channel
->channel
].name
,
560 sizeof(efx
->msi_context
[0].name
));
563 static int ef4_probe_channels(struct ef4_nic
*efx
)
565 struct ef4_channel
*channel
;
568 /* Restart special buffer allocation */
569 efx
->next_buffer_table
= 0;
571 /* Probe channels in reverse, so that any 'extra' channels
572 * use the start of the buffer table. This allows the traffic
573 * channels to be resized without moving them or wasting the
574 * entries before them.
576 ef4_for_each_channel_rev(channel
, efx
) {
577 rc
= ef4_probe_channel(channel
);
579 netif_err(efx
, probe
, efx
->net_dev
,
580 "failed to create channel %d\n",
585 ef4_set_channel_names(efx
);
590 ef4_remove_channels(efx
);
594 /* Channels are shutdown and reinitialised whilst the NIC is running
595 * to propagate configuration changes (mtu, checksum offload), or
596 * to clear hardware error conditions
598 static void ef4_start_datapath(struct ef4_nic
*efx
)
600 netdev_features_t old_features
= efx
->net_dev
->features
;
601 bool old_rx_scatter
= efx
->rx_scatter
;
602 struct ef4_tx_queue
*tx_queue
;
603 struct ef4_rx_queue
*rx_queue
;
604 struct ef4_channel
*channel
;
607 /* Calculate the rx buffer allocation parameters required to
608 * support the current MTU, including padding for header
609 * alignment and overruns.
611 efx
->rx_dma_len
= (efx
->rx_prefix_size
+
612 EF4_MAX_FRAME_LEN(efx
->net_dev
->mtu
) +
613 efx
->type
->rx_buffer_padding
);
614 rx_buf_len
= (sizeof(struct ef4_rx_page_state
) +
615 efx
->rx_ip_align
+ efx
->rx_dma_len
);
616 if (rx_buf_len
<= PAGE_SIZE
) {
617 efx
->rx_scatter
= efx
->type
->always_rx_scatter
;
618 efx
->rx_buffer_order
= 0;
619 } else if (efx
->type
->can_rx_scatter
) {
620 BUILD_BUG_ON(EF4_RX_USR_BUF_SIZE
% L1_CACHE_BYTES
);
621 BUILD_BUG_ON(sizeof(struct ef4_rx_page_state
) +
622 2 * ALIGN(NET_IP_ALIGN
+ EF4_RX_USR_BUF_SIZE
,
623 EF4_RX_BUF_ALIGNMENT
) >
625 efx
->rx_scatter
= true;
626 efx
->rx_dma_len
= EF4_RX_USR_BUF_SIZE
;
627 efx
->rx_buffer_order
= 0;
629 efx
->rx_scatter
= false;
630 efx
->rx_buffer_order
= get_order(rx_buf_len
);
633 ef4_rx_config_page_split(efx
);
634 if (efx
->rx_buffer_order
)
635 netif_dbg(efx
, drv
, efx
->net_dev
,
636 "RX buf len=%u; page order=%u batch=%u\n",
637 efx
->rx_dma_len
, efx
->rx_buffer_order
,
638 efx
->rx_pages_per_batch
);
640 netif_dbg(efx
, drv
, efx
->net_dev
,
641 "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
642 efx
->rx_dma_len
, efx
->rx_page_buf_step
,
643 efx
->rx_bufs_per_page
, efx
->rx_pages_per_batch
);
645 /* Restore previously fixed features in hw_features and remove
646 * features which are fixed now
648 efx
->net_dev
->hw_features
|= efx
->net_dev
->features
;
649 efx
->net_dev
->hw_features
&= ~efx
->fixed_features
;
650 efx
->net_dev
->features
|= efx
->fixed_features
;
651 if (efx
->net_dev
->features
!= old_features
)
652 netdev_features_change(efx
->net_dev
);
654 /* RX filters may also have scatter-enabled flags */
655 if (efx
->rx_scatter
!= old_rx_scatter
)
656 efx
->type
->filter_update_rx_scatter(efx
);
658 /* We must keep at least one descriptor in a TX ring empty.
659 * We could avoid this when the queue size does not exactly
660 * match the hardware ring size, but it's not that important.
661 * Therefore we stop the queue when one more skb might fill
662 * the ring completely. We wake it when half way back to
665 efx
->txq_stop_thresh
= efx
->txq_entries
- ef4_tx_max_skb_descs(efx
);
666 efx
->txq_wake_thresh
= efx
->txq_stop_thresh
/ 2;
668 /* Initialise the channels */
669 ef4_for_each_channel(channel
, efx
) {
670 ef4_for_each_channel_tx_queue(tx_queue
, channel
) {
671 ef4_init_tx_queue(tx_queue
);
672 atomic_inc(&efx
->active_queues
);
675 ef4_for_each_channel_rx_queue(rx_queue
, channel
) {
676 ef4_init_rx_queue(rx_queue
);
677 atomic_inc(&efx
->active_queues
);
678 ef4_stop_eventq(channel
);
679 ef4_fast_push_rx_descriptors(rx_queue
, false);
680 ef4_start_eventq(channel
);
683 WARN_ON(channel
->rx_pkt_n_frags
);
686 if (netif_device_present(efx
->net_dev
))
687 netif_tx_wake_all_queues(efx
->net_dev
);
690 static void ef4_stop_datapath(struct ef4_nic
*efx
)
692 struct ef4_channel
*channel
;
693 struct ef4_tx_queue
*tx_queue
;
694 struct ef4_rx_queue
*rx_queue
;
697 EF4_ASSERT_RESET_SERIALISED(efx
);
698 BUG_ON(efx
->port_enabled
);
701 ef4_for_each_channel(channel
, efx
) {
702 ef4_for_each_channel_rx_queue(rx_queue
, channel
)
703 rx_queue
->refill_enabled
= false;
706 ef4_for_each_channel(channel
, efx
) {
707 /* RX packet processing is pipelined, so wait for the
708 * NAPI handler to complete. At least event queue 0
709 * might be kept active by non-data events, so don't
710 * use napi_synchronize() but actually disable NAPI
713 if (ef4_channel_has_rx_queue(channel
)) {
714 ef4_stop_eventq(channel
);
715 ef4_start_eventq(channel
);
719 rc
= efx
->type
->fini_dmaq(efx
);
720 if (rc
&& EF4_WORKAROUND_7803(efx
)) {
721 /* Schedule a reset to recover from the flush failure. The
722 * descriptor caches reference memory we're about to free,
723 * but falcon_reconfigure_mac_wrapper() won't reconnect
724 * the MACs because of the pending reset.
726 netif_err(efx
, drv
, efx
->net_dev
,
727 "Resetting to recover from flush failure\n");
728 ef4_schedule_reset(efx
, RESET_TYPE_ALL
);
730 netif_err(efx
, drv
, efx
->net_dev
, "failed to flush queues\n");
732 netif_dbg(efx
, drv
, efx
->net_dev
,
733 "successfully flushed all queues\n");
736 ef4_for_each_channel(channel
, efx
) {
737 ef4_for_each_channel_rx_queue(rx_queue
, channel
)
738 ef4_fini_rx_queue(rx_queue
);
739 ef4_for_each_possible_channel_tx_queue(tx_queue
, channel
)
740 ef4_fini_tx_queue(tx_queue
);
744 static void ef4_remove_channel(struct ef4_channel
*channel
)
746 struct ef4_tx_queue
*tx_queue
;
747 struct ef4_rx_queue
*rx_queue
;
749 netif_dbg(channel
->efx
, drv
, channel
->efx
->net_dev
,
750 "destroy chan %d\n", channel
->channel
);
752 ef4_for_each_channel_rx_queue(rx_queue
, channel
)
753 ef4_remove_rx_queue(rx_queue
);
754 ef4_for_each_possible_channel_tx_queue(tx_queue
, channel
)
755 ef4_remove_tx_queue(tx_queue
);
756 ef4_remove_eventq(channel
);
757 channel
->type
->post_remove(channel
);
760 static void ef4_remove_channels(struct ef4_nic
*efx
)
762 struct ef4_channel
*channel
;
764 ef4_for_each_channel(channel
, efx
)
765 ef4_remove_channel(channel
);
769 ef4_realloc_channels(struct ef4_nic
*efx
, u32 rxq_entries
, u32 txq_entries
)
771 struct ef4_channel
*other_channel
[EF4_MAX_CHANNELS
], *channel
;
772 u32 old_rxq_entries
, old_txq_entries
;
773 unsigned i
, next_buffer_table
= 0;
776 rc
= ef4_check_disabled(efx
);
780 /* Not all channels should be reallocated. We must avoid
781 * reallocating their buffer table entries.
783 ef4_for_each_channel(channel
, efx
) {
784 struct ef4_rx_queue
*rx_queue
;
785 struct ef4_tx_queue
*tx_queue
;
787 if (channel
->type
->copy
)
789 next_buffer_table
= max(next_buffer_table
,
790 channel
->eventq
.index
+
791 channel
->eventq
.entries
);
792 ef4_for_each_channel_rx_queue(rx_queue
, channel
)
793 next_buffer_table
= max(next_buffer_table
,
794 rx_queue
->rxd
.index
+
795 rx_queue
->rxd
.entries
);
796 ef4_for_each_channel_tx_queue(tx_queue
, channel
)
797 next_buffer_table
= max(next_buffer_table
,
798 tx_queue
->txd
.index
+
799 tx_queue
->txd
.entries
);
802 ef4_device_detach_sync(efx
);
804 ef4_soft_disable_interrupts(efx
);
806 /* Clone channels (where possible) */
807 memset(other_channel
, 0, sizeof(other_channel
));
808 for (i
= 0; i
< efx
->n_channels
; i
++) {
809 channel
= efx
->channel
[i
];
810 if (channel
->type
->copy
)
811 channel
= channel
->type
->copy(channel
);
816 other_channel
[i
] = channel
;
819 /* Swap entry counts and channel pointers */
820 old_rxq_entries
= efx
->rxq_entries
;
821 old_txq_entries
= efx
->txq_entries
;
822 efx
->rxq_entries
= rxq_entries
;
823 efx
->txq_entries
= txq_entries
;
824 for (i
= 0; i
< efx
->n_channels
; i
++) {
825 channel
= efx
->channel
[i
];
826 efx
->channel
[i
] = other_channel
[i
];
827 other_channel
[i
] = channel
;
830 /* Restart buffer table allocation */
831 efx
->next_buffer_table
= next_buffer_table
;
833 for (i
= 0; i
< efx
->n_channels
; i
++) {
834 channel
= efx
->channel
[i
];
835 if (!channel
->type
->copy
)
837 rc
= ef4_probe_channel(channel
);
840 ef4_init_napi_channel(efx
->channel
[i
]);
844 /* Destroy unused channel structures */
845 for (i
= 0; i
< efx
->n_channels
; i
++) {
846 channel
= other_channel
[i
];
847 if (channel
&& channel
->type
->copy
) {
848 ef4_fini_napi_channel(channel
);
849 ef4_remove_channel(channel
);
854 rc2
= ef4_soft_enable_interrupts(efx
);
857 netif_err(efx
, drv
, efx
->net_dev
,
858 "unable to restart interrupts on channel reallocation\n");
859 ef4_schedule_reset(efx
, RESET_TYPE_DISABLE
);
862 netif_device_attach(efx
->net_dev
);
868 efx
->rxq_entries
= old_rxq_entries
;
869 efx
->txq_entries
= old_txq_entries
;
870 for (i
= 0; i
< efx
->n_channels
; i
++) {
871 channel
= efx
->channel
[i
];
872 efx
->channel
[i
] = other_channel
[i
];
873 other_channel
[i
] = channel
;
878 void ef4_schedule_slow_fill(struct ef4_rx_queue
*rx_queue
)
880 mod_timer(&rx_queue
->slow_fill
, jiffies
+ msecs_to_jiffies(100));
883 static const struct ef4_channel_type ef4_default_channel_type
= {
884 .pre_probe
= ef4_channel_dummy_op_int
,
885 .post_remove
= ef4_channel_dummy_op_void
,
886 .get_name
= ef4_get_channel_name
,
887 .copy
= ef4_copy_channel
,
888 .keep_eventq
= false,
891 int ef4_channel_dummy_op_int(struct ef4_channel
*channel
)
896 void ef4_channel_dummy_op_void(struct ef4_channel
*channel
)
900 /**************************************************************************
904 **************************************************************************/
906 /* This ensures that the kernel is kept informed (via
907 * netif_carrier_on/off) of the link status, and also maintains the
908 * link status's stop on the port's TX queue.
910 void ef4_link_status_changed(struct ef4_nic
*efx
)
912 struct ef4_link_state
*link_state
= &efx
->link_state
;
914 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
915 * that no events are triggered between unregister_netdev() and the
916 * driver unloading. A more general condition is that NETDEV_CHANGE
917 * can only be generated between NETDEV_UP and NETDEV_DOWN */
918 if (!netif_running(efx
->net_dev
))
921 if (link_state
->up
!= netif_carrier_ok(efx
->net_dev
)) {
922 efx
->n_link_state_changes
++;
925 netif_carrier_on(efx
->net_dev
);
927 netif_carrier_off(efx
->net_dev
);
930 /* Status message for kernel log */
932 netif_info(efx
, link
, efx
->net_dev
,
933 "link up at %uMbps %s-duplex (MTU %d)\n",
934 link_state
->speed
, link_state
->fd
? "full" : "half",
937 netif_info(efx
, link
, efx
->net_dev
, "link down\n");
940 void ef4_link_set_advertising(struct ef4_nic
*efx
, u32 advertising
)
942 efx
->link_advertising
= advertising
;
944 if (advertising
& ADVERTISED_Pause
)
945 efx
->wanted_fc
|= (EF4_FC_TX
| EF4_FC_RX
);
947 efx
->wanted_fc
&= ~(EF4_FC_TX
| EF4_FC_RX
);
948 if (advertising
& ADVERTISED_Asym_Pause
)
949 efx
->wanted_fc
^= EF4_FC_TX
;
953 void ef4_link_set_wanted_fc(struct ef4_nic
*efx
, u8 wanted_fc
)
955 efx
->wanted_fc
= wanted_fc
;
956 if (efx
->link_advertising
) {
957 if (wanted_fc
& EF4_FC_RX
)
958 efx
->link_advertising
|= (ADVERTISED_Pause
|
959 ADVERTISED_Asym_Pause
);
961 efx
->link_advertising
&= ~(ADVERTISED_Pause
|
962 ADVERTISED_Asym_Pause
);
963 if (wanted_fc
& EF4_FC_TX
)
964 efx
->link_advertising
^= ADVERTISED_Asym_Pause
;
968 static void ef4_fini_port(struct ef4_nic
*efx
);
970 /* We assume that efx->type->reconfigure_mac will always try to sync RX
971 * filters and therefore needs to read-lock the filter table against freeing
973 void ef4_mac_reconfigure(struct ef4_nic
*efx
)
975 down_read(&efx
->filter_sem
);
976 efx
->type
->reconfigure_mac(efx
);
977 up_read(&efx
->filter_sem
);
980 /* Push loopback/power/transmit disable settings to the PHY, and reconfigure
981 * the MAC appropriately. All other PHY configuration changes are pushed
982 * through phy_op->set_link_ksettings(), and pushed asynchronously to the MAC
983 * through ef4_monitor().
985 * Callers must hold the mac_lock
987 int __ef4_reconfigure_port(struct ef4_nic
*efx
)
989 enum ef4_phy_mode phy_mode
;
992 WARN_ON(!mutex_is_locked(&efx
->mac_lock
));
994 /* Disable PHY transmit in mac level loopbacks */
995 phy_mode
= efx
->phy_mode
;
996 if (LOOPBACK_INTERNAL(efx
))
997 efx
->phy_mode
|= PHY_MODE_TX_DISABLED
;
999 efx
->phy_mode
&= ~PHY_MODE_TX_DISABLED
;
1001 rc
= efx
->type
->reconfigure_port(efx
);
1004 efx
->phy_mode
= phy_mode
;
1009 /* Reinitialise the MAC to pick up new PHY settings, even if the port is
1011 int ef4_reconfigure_port(struct ef4_nic
*efx
)
1015 EF4_ASSERT_RESET_SERIALISED(efx
);
1017 mutex_lock(&efx
->mac_lock
);
1018 rc
= __ef4_reconfigure_port(efx
);
1019 mutex_unlock(&efx
->mac_lock
);
1024 /* Asynchronous work item for changing MAC promiscuity and multicast
1025 * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
1027 static void ef4_mac_work(struct work_struct
*data
)
1029 struct ef4_nic
*efx
= container_of(data
, struct ef4_nic
, mac_work
);
1031 mutex_lock(&efx
->mac_lock
);
1032 if (efx
->port_enabled
)
1033 ef4_mac_reconfigure(efx
);
1034 mutex_unlock(&efx
->mac_lock
);
1037 static int ef4_probe_port(struct ef4_nic
*efx
)
1041 netif_dbg(efx
, probe
, efx
->net_dev
, "create port\n");
1044 efx
->phy_mode
= PHY_MODE_SPECIAL
;
1046 /* Connect up MAC/PHY operations table */
1047 rc
= efx
->type
->probe_port(efx
);
1051 /* Initialise MAC address to permanent address */
1052 ether_addr_copy(efx
->net_dev
->dev_addr
, efx
->net_dev
->perm_addr
);
1057 static int ef4_init_port(struct ef4_nic
*efx
)
1061 netif_dbg(efx
, drv
, efx
->net_dev
, "init port\n");
1063 mutex_lock(&efx
->mac_lock
);
1065 rc
= efx
->phy_op
->init(efx
);
1069 efx
->port_initialized
= true;
1071 /* Reconfigure the MAC before creating dma queues (required for
1072 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
1073 ef4_mac_reconfigure(efx
);
1075 /* Ensure the PHY advertises the correct flow control settings */
1076 rc
= efx
->phy_op
->reconfigure(efx
);
1077 if (rc
&& rc
!= -EPERM
)
1080 mutex_unlock(&efx
->mac_lock
);
1084 efx
->phy_op
->fini(efx
);
1086 mutex_unlock(&efx
->mac_lock
);
1090 static void ef4_start_port(struct ef4_nic
*efx
)
1092 netif_dbg(efx
, ifup
, efx
->net_dev
, "start port\n");
1093 BUG_ON(efx
->port_enabled
);
1095 mutex_lock(&efx
->mac_lock
);
1096 efx
->port_enabled
= true;
1098 /* Ensure MAC ingress/egress is enabled */
1099 ef4_mac_reconfigure(efx
);
1101 mutex_unlock(&efx
->mac_lock
);
1104 /* Cancel work for MAC reconfiguration, periodic hardware monitoring
1105 * and the async self-test, wait for them to finish and prevent them
1106 * being scheduled again. This doesn't cover online resets, which
1107 * should only be cancelled when removing the device.
1109 static void ef4_stop_port(struct ef4_nic
*efx
)
1111 netif_dbg(efx
, ifdown
, efx
->net_dev
, "stop port\n");
1113 EF4_ASSERT_RESET_SERIALISED(efx
);
1115 mutex_lock(&efx
->mac_lock
);
1116 efx
->port_enabled
= false;
1117 mutex_unlock(&efx
->mac_lock
);
1119 /* Serialise against ef4_set_multicast_list() */
1120 netif_addr_lock_bh(efx
->net_dev
);
1121 netif_addr_unlock_bh(efx
->net_dev
);
1123 cancel_delayed_work_sync(&efx
->monitor_work
);
1124 ef4_selftest_async_cancel(efx
);
1125 cancel_work_sync(&efx
->mac_work
);
1128 static void ef4_fini_port(struct ef4_nic
*efx
)
1130 netif_dbg(efx
, drv
, efx
->net_dev
, "shut down port\n");
1132 if (!efx
->port_initialized
)
1135 efx
->phy_op
->fini(efx
);
1136 efx
->port_initialized
= false;
1138 efx
->link_state
.up
= false;
1139 ef4_link_status_changed(efx
);
1142 static void ef4_remove_port(struct ef4_nic
*efx
)
1144 netif_dbg(efx
, drv
, efx
->net_dev
, "destroying port\n");
1146 efx
->type
->remove_port(efx
);
1149 /**************************************************************************
1153 **************************************************************************/
1155 static LIST_HEAD(ef4_primary_list
);
1156 static LIST_HEAD(ef4_unassociated_list
);
1158 static bool ef4_same_controller(struct ef4_nic
*left
, struct ef4_nic
*right
)
1160 return left
->type
== right
->type
&&
1161 left
->vpd_sn
&& right
->vpd_sn
&&
1162 !strcmp(left
->vpd_sn
, right
->vpd_sn
);
1165 static void ef4_associate(struct ef4_nic
*efx
)
1167 struct ef4_nic
*other
, *next
;
1169 if (efx
->primary
== efx
) {
1170 /* Adding primary function; look for secondaries */
1172 netif_dbg(efx
, probe
, efx
->net_dev
, "adding to primary list\n");
1173 list_add_tail(&efx
->node
, &ef4_primary_list
);
1175 list_for_each_entry_safe(other
, next
, &ef4_unassociated_list
,
1177 if (ef4_same_controller(efx
, other
)) {
1178 list_del(&other
->node
);
1179 netif_dbg(other
, probe
, other
->net_dev
,
1180 "moving to secondary list of %s %s\n",
1181 pci_name(efx
->pci_dev
),
1182 efx
->net_dev
->name
);
1183 list_add_tail(&other
->node
,
1184 &efx
->secondary_list
);
1185 other
->primary
= efx
;
1189 /* Adding secondary function; look for primary */
1191 list_for_each_entry(other
, &ef4_primary_list
, node
) {
1192 if (ef4_same_controller(efx
, other
)) {
1193 netif_dbg(efx
, probe
, efx
->net_dev
,
1194 "adding to secondary list of %s %s\n",
1195 pci_name(other
->pci_dev
),
1196 other
->net_dev
->name
);
1197 list_add_tail(&efx
->node
,
1198 &other
->secondary_list
);
1199 efx
->primary
= other
;
1204 netif_dbg(efx
, probe
, efx
->net_dev
,
1205 "adding to unassociated list\n");
1206 list_add_tail(&efx
->node
, &ef4_unassociated_list
);
1210 static void ef4_dissociate(struct ef4_nic
*efx
)
1212 struct ef4_nic
*other
, *next
;
1214 list_del(&efx
->node
);
1215 efx
->primary
= NULL
;
1217 list_for_each_entry_safe(other
, next
, &efx
->secondary_list
, node
) {
1218 list_del(&other
->node
);
1219 netif_dbg(other
, probe
, other
->net_dev
,
1220 "moving to unassociated list\n");
1221 list_add_tail(&other
->node
, &ef4_unassociated_list
);
1222 other
->primary
= NULL
;
1226 /* This configures the PCI device to enable I/O and DMA. */
1227 static int ef4_init_io(struct ef4_nic
*efx
)
1229 struct pci_dev
*pci_dev
= efx
->pci_dev
;
1230 dma_addr_t dma_mask
= efx
->type
->max_dma_mask
;
1231 unsigned int mem_map_size
= efx
->type
->mem_map_size(efx
);
1234 netif_dbg(efx
, probe
, efx
->net_dev
, "initialising I/O\n");
1236 bar
= efx
->type
->mem_bar
;
1238 rc
= pci_enable_device(pci_dev
);
1240 netif_err(efx
, probe
, efx
->net_dev
,
1241 "failed to enable PCI device\n");
1245 pci_set_master(pci_dev
);
1247 /* Set the PCI DMA mask. Try all possibilities from our
1248 * genuine mask down to 32 bits, because some architectures
1249 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
1250 * masks event though they reject 46 bit masks.
1252 while (dma_mask
> 0x7fffffffUL
) {
1253 rc
= dma_set_mask_and_coherent(&pci_dev
->dev
, dma_mask
);
1259 netif_err(efx
, probe
, efx
->net_dev
,
1260 "could not find a suitable DMA mask\n");
1263 netif_dbg(efx
, probe
, efx
->net_dev
,
1264 "using DMA mask %llx\n", (unsigned long long) dma_mask
);
1266 efx
->membase_phys
= pci_resource_start(efx
->pci_dev
, bar
);
1267 rc
= pci_request_region(pci_dev
, bar
, "sfc");
1269 netif_err(efx
, probe
, efx
->net_dev
,
1270 "request for memory BAR failed\n");
1274 efx
->membase
= ioremap_nocache(efx
->membase_phys
, mem_map_size
);
1275 if (!efx
->membase
) {
1276 netif_err(efx
, probe
, efx
->net_dev
,
1277 "could not map memory BAR at %llx+%x\n",
1278 (unsigned long long)efx
->membase_phys
, mem_map_size
);
1282 netif_dbg(efx
, probe
, efx
->net_dev
,
1283 "memory BAR at %llx+%x (virtual %p)\n",
1284 (unsigned long long)efx
->membase_phys
, mem_map_size
,
1290 pci_release_region(efx
->pci_dev
, bar
);
1292 efx
->membase_phys
= 0;
1294 pci_disable_device(efx
->pci_dev
);
1299 static void ef4_fini_io(struct ef4_nic
*efx
)
1303 netif_dbg(efx
, drv
, efx
->net_dev
, "shutting down I/O\n");
1306 iounmap(efx
->membase
);
1307 efx
->membase
= NULL
;
1310 if (efx
->membase_phys
) {
1311 bar
= efx
->type
->mem_bar
;
1312 pci_release_region(efx
->pci_dev
, bar
);
1313 efx
->membase_phys
= 0;
1316 /* Don't disable bus-mastering if VFs are assigned */
1317 if (!pci_vfs_assigned(efx
->pci_dev
))
1318 pci_disable_device(efx
->pci_dev
);
1321 void ef4_set_default_rx_indir_table(struct ef4_nic
*efx
)
1325 for (i
= 0; i
< ARRAY_SIZE(efx
->rx_indir_table
); i
++)
1326 efx
->rx_indir_table
[i
] =
1327 ethtool_rxfh_indir_default(i
, efx
->rss_spread
);
1330 static unsigned int ef4_wanted_parallelism(struct ef4_nic
*efx
)
1332 cpumask_var_t thread_mask
;
1339 if (unlikely(!zalloc_cpumask_var(&thread_mask
, GFP_KERNEL
))) {
1340 netif_warn(efx
, probe
, efx
->net_dev
,
1341 "RSS disabled due to allocation failure\n");
1346 for_each_online_cpu(cpu
) {
1347 if (!cpumask_test_cpu(cpu
, thread_mask
)) {
1349 cpumask_or(thread_mask
, thread_mask
,
1350 topology_sibling_cpumask(cpu
));
1354 free_cpumask_var(thread_mask
);
1360 /* Probe the number and type of interrupts we are able to obtain, and
1361 * the resulting numbers of channels and RX queues.
1363 static int ef4_probe_interrupts(struct ef4_nic
*efx
)
1365 unsigned int extra_channels
= 0;
1369 for (i
= 0; i
< EF4_MAX_EXTRA_CHANNELS
; i
++)
1370 if (efx
->extra_channel_type
[i
])
1373 if (efx
->interrupt_mode
== EF4_INT_MODE_MSIX
) {
1374 struct msix_entry xentries
[EF4_MAX_CHANNELS
];
1375 unsigned int n_channels
;
1377 n_channels
= ef4_wanted_parallelism(efx
);
1378 if (ef4_separate_tx_channels
)
1380 n_channels
+= extra_channels
;
1381 n_channels
= min(n_channels
, efx
->max_channels
);
1383 for (i
= 0; i
< n_channels
; i
++)
1384 xentries
[i
].entry
= i
;
1385 rc
= pci_enable_msix_range(efx
->pci_dev
,
1386 xentries
, 1, n_channels
);
1388 /* Fall back to single channel MSI */
1389 efx
->interrupt_mode
= EF4_INT_MODE_MSI
;
1390 netif_err(efx
, drv
, efx
->net_dev
,
1391 "could not enable MSI-X\n");
1392 } else if (rc
< n_channels
) {
1393 netif_err(efx
, drv
, efx
->net_dev
,
1394 "WARNING: Insufficient MSI-X vectors"
1395 " available (%d < %u).\n", rc
, n_channels
);
1396 netif_err(efx
, drv
, efx
->net_dev
,
1397 "WARNING: Performance may be reduced.\n");
1402 efx
->n_channels
= n_channels
;
1403 if (n_channels
> extra_channels
)
1404 n_channels
-= extra_channels
;
1405 if (ef4_separate_tx_channels
) {
1406 efx
->n_tx_channels
= min(max(n_channels
/ 2,
1408 efx
->max_tx_channels
);
1409 efx
->n_rx_channels
= max(n_channels
-
1413 efx
->n_tx_channels
= min(n_channels
,
1414 efx
->max_tx_channels
);
1415 efx
->n_rx_channels
= n_channels
;
1417 for (i
= 0; i
< efx
->n_channels
; i
++)
1418 ef4_get_channel(efx
, i
)->irq
=
1423 /* Try single interrupt MSI */
1424 if (efx
->interrupt_mode
== EF4_INT_MODE_MSI
) {
1425 efx
->n_channels
= 1;
1426 efx
->n_rx_channels
= 1;
1427 efx
->n_tx_channels
= 1;
1428 rc
= pci_enable_msi(efx
->pci_dev
);
1430 ef4_get_channel(efx
, 0)->irq
= efx
->pci_dev
->irq
;
1432 netif_err(efx
, drv
, efx
->net_dev
,
1433 "could not enable MSI\n");
1434 efx
->interrupt_mode
= EF4_INT_MODE_LEGACY
;
1438 /* Assume legacy interrupts */
1439 if (efx
->interrupt_mode
== EF4_INT_MODE_LEGACY
) {
1440 efx
->n_channels
= 1 + (ef4_separate_tx_channels
? 1 : 0);
1441 efx
->n_rx_channels
= 1;
1442 efx
->n_tx_channels
= 1;
1443 efx
->legacy_irq
= efx
->pci_dev
->irq
;
1446 /* Assign extra channels if possible */
1447 j
= efx
->n_channels
;
1448 for (i
= 0; i
< EF4_MAX_EXTRA_CHANNELS
; i
++) {
1449 if (!efx
->extra_channel_type
[i
])
1451 if (efx
->interrupt_mode
!= EF4_INT_MODE_MSIX
||
1452 efx
->n_channels
<= extra_channels
) {
1453 efx
->extra_channel_type
[i
]->handle_no_channel(efx
);
1456 ef4_get_channel(efx
, j
)->type
=
1457 efx
->extra_channel_type
[i
];
1461 efx
->rss_spread
= efx
->n_rx_channels
;
1466 static int ef4_soft_enable_interrupts(struct ef4_nic
*efx
)
1468 struct ef4_channel
*channel
, *end_channel
;
1471 BUG_ON(efx
->state
== STATE_DISABLED
);
1473 efx
->irq_soft_enabled
= true;
1476 ef4_for_each_channel(channel
, efx
) {
1477 if (!channel
->type
->keep_eventq
) {
1478 rc
= ef4_init_eventq(channel
);
1482 ef4_start_eventq(channel
);
1487 end_channel
= channel
;
1488 ef4_for_each_channel(channel
, efx
) {
1489 if (channel
== end_channel
)
1491 ef4_stop_eventq(channel
);
1492 if (!channel
->type
->keep_eventq
)
1493 ef4_fini_eventq(channel
);
1499 static void ef4_soft_disable_interrupts(struct ef4_nic
*efx
)
1501 struct ef4_channel
*channel
;
1503 if (efx
->state
== STATE_DISABLED
)
1506 efx
->irq_soft_enabled
= false;
1509 if (efx
->legacy_irq
)
1510 synchronize_irq(efx
->legacy_irq
);
1512 ef4_for_each_channel(channel
, efx
) {
1514 synchronize_irq(channel
->irq
);
1516 ef4_stop_eventq(channel
);
1517 if (!channel
->type
->keep_eventq
)
1518 ef4_fini_eventq(channel
);
1522 static int ef4_enable_interrupts(struct ef4_nic
*efx
)
1524 struct ef4_channel
*channel
, *end_channel
;
1527 BUG_ON(efx
->state
== STATE_DISABLED
);
1529 if (efx
->eeh_disabled_legacy_irq
) {
1530 enable_irq(efx
->legacy_irq
);
1531 efx
->eeh_disabled_legacy_irq
= false;
1534 efx
->type
->irq_enable_master(efx
);
1536 ef4_for_each_channel(channel
, efx
) {
1537 if (channel
->type
->keep_eventq
) {
1538 rc
= ef4_init_eventq(channel
);
1544 rc
= ef4_soft_enable_interrupts(efx
);
1551 end_channel
= channel
;
1552 ef4_for_each_channel(channel
, efx
) {
1553 if (channel
== end_channel
)
1555 if (channel
->type
->keep_eventq
)
1556 ef4_fini_eventq(channel
);
1559 efx
->type
->irq_disable_non_ev(efx
);
1564 static void ef4_disable_interrupts(struct ef4_nic
*efx
)
1566 struct ef4_channel
*channel
;
1568 ef4_soft_disable_interrupts(efx
);
1570 ef4_for_each_channel(channel
, efx
) {
1571 if (channel
->type
->keep_eventq
)
1572 ef4_fini_eventq(channel
);
1575 efx
->type
->irq_disable_non_ev(efx
);
1578 static void ef4_remove_interrupts(struct ef4_nic
*efx
)
1580 struct ef4_channel
*channel
;
1582 /* Remove MSI/MSI-X interrupts */
1583 ef4_for_each_channel(channel
, efx
)
1585 pci_disable_msi(efx
->pci_dev
);
1586 pci_disable_msix(efx
->pci_dev
);
1588 /* Remove legacy interrupt */
1589 efx
->legacy_irq
= 0;
1592 static void ef4_set_channels(struct ef4_nic
*efx
)
1594 struct ef4_channel
*channel
;
1595 struct ef4_tx_queue
*tx_queue
;
1597 efx
->tx_channel_offset
=
1598 ef4_separate_tx_channels
?
1599 efx
->n_channels
- efx
->n_tx_channels
: 0;
1601 /* We need to mark which channels really have RX and TX
1602 * queues, and adjust the TX queue numbers if we have separate
1603 * RX-only and TX-only channels.
1605 ef4_for_each_channel(channel
, efx
) {
1606 if (channel
->channel
< efx
->n_rx_channels
)
1607 channel
->rx_queue
.core_index
= channel
->channel
;
1609 channel
->rx_queue
.core_index
= -1;
1611 ef4_for_each_channel_tx_queue(tx_queue
, channel
)
1612 tx_queue
->queue
-= (efx
->tx_channel_offset
*
1617 static int ef4_probe_nic(struct ef4_nic
*efx
)
1621 netif_dbg(efx
, probe
, efx
->net_dev
, "creating NIC\n");
1623 /* Carry out hardware-type specific initialisation */
1624 rc
= efx
->type
->probe(efx
);
1629 if (!efx
->max_channels
|| !efx
->max_tx_channels
) {
1630 netif_err(efx
, drv
, efx
->net_dev
,
1631 "Insufficient resources to allocate"
1637 /* Determine the number of channels and queues by trying
1638 * to hook in MSI-X interrupts.
1640 rc
= ef4_probe_interrupts(efx
);
1644 ef4_set_channels(efx
);
1646 /* dimension_resources can fail with EAGAIN */
1647 rc
= efx
->type
->dimension_resources(efx
);
1648 if (rc
!= 0 && rc
!= -EAGAIN
)
1652 /* try again with new max_channels */
1653 ef4_remove_interrupts(efx
);
1655 } while (rc
== -EAGAIN
);
1657 if (efx
->n_channels
> 1)
1658 netdev_rss_key_fill(&efx
->rx_hash_key
,
1659 sizeof(efx
->rx_hash_key
));
1660 ef4_set_default_rx_indir_table(efx
);
1662 netif_set_real_num_tx_queues(efx
->net_dev
, efx
->n_tx_channels
);
1663 netif_set_real_num_rx_queues(efx
->net_dev
, efx
->n_rx_channels
);
1665 /* Initialise the interrupt moderation settings */
1666 efx
->irq_mod_step_us
= DIV_ROUND_UP(efx
->timer_quantum_ns
, 1000);
1667 ef4_init_irq_moderation(efx
, tx_irq_mod_usec
, rx_irq_mod_usec
, true,
1673 ef4_remove_interrupts(efx
);
1675 efx
->type
->remove(efx
);
1679 static void ef4_remove_nic(struct ef4_nic
*efx
)
1681 netif_dbg(efx
, drv
, efx
->net_dev
, "destroying NIC\n");
1683 ef4_remove_interrupts(efx
);
1684 efx
->type
->remove(efx
);
1687 static int ef4_probe_filters(struct ef4_nic
*efx
)
1691 spin_lock_init(&efx
->filter_lock
);
1692 init_rwsem(&efx
->filter_sem
);
1693 mutex_lock(&efx
->mac_lock
);
1694 down_write(&efx
->filter_sem
);
1695 rc
= efx
->type
->filter_table_probe(efx
);
1699 #ifdef CONFIG_RFS_ACCEL
1700 if (efx
->type
->offload_features
& NETIF_F_NTUPLE
) {
1701 struct ef4_channel
*channel
;
1704 ef4_for_each_channel(channel
, efx
) {
1705 channel
->rps_flow_id
=
1706 kcalloc(efx
->type
->max_rx_ip_filters
,
1707 sizeof(*channel
->rps_flow_id
),
1709 if (!channel
->rps_flow_id
)
1713 i
< efx
->type
->max_rx_ip_filters
;
1715 channel
->rps_flow_id
[i
] =
1716 RPS_FLOW_ID_INVALID
;
1720 ef4_for_each_channel(channel
, efx
)
1721 kfree(channel
->rps_flow_id
);
1722 efx
->type
->filter_table_remove(efx
);
1727 efx
->rps_expire_index
= efx
->rps_expire_channel
= 0;
1731 up_write(&efx
->filter_sem
);
1732 mutex_unlock(&efx
->mac_lock
);
1736 static void ef4_remove_filters(struct ef4_nic
*efx
)
1738 #ifdef CONFIG_RFS_ACCEL
1739 struct ef4_channel
*channel
;
1741 ef4_for_each_channel(channel
, efx
)
1742 kfree(channel
->rps_flow_id
);
1744 down_write(&efx
->filter_sem
);
1745 efx
->type
->filter_table_remove(efx
);
1746 up_write(&efx
->filter_sem
);
1749 static void ef4_restore_filters(struct ef4_nic
*efx
)
1751 down_read(&efx
->filter_sem
);
1752 efx
->type
->filter_table_restore(efx
);
1753 up_read(&efx
->filter_sem
);
1756 /**************************************************************************
1758 * NIC startup/shutdown
1760 *************************************************************************/
1762 static int ef4_probe_all(struct ef4_nic
*efx
)
1766 rc
= ef4_probe_nic(efx
);
1768 netif_err(efx
, probe
, efx
->net_dev
, "failed to create NIC\n");
1772 rc
= ef4_probe_port(efx
);
1774 netif_err(efx
, probe
, efx
->net_dev
, "failed to create port\n");
1778 BUILD_BUG_ON(EF4_DEFAULT_DMAQ_SIZE
< EF4_RXQ_MIN_ENT
);
1779 if (WARN_ON(EF4_DEFAULT_DMAQ_SIZE
< EF4_TXQ_MIN_ENT(efx
))) {
1783 efx
->rxq_entries
= efx
->txq_entries
= EF4_DEFAULT_DMAQ_SIZE
;
1785 rc
= ef4_probe_filters(efx
);
1787 netif_err(efx
, probe
, efx
->net_dev
,
1788 "failed to create filter tables\n");
1792 rc
= ef4_probe_channels(efx
);
1799 ef4_remove_filters(efx
);
1802 ef4_remove_port(efx
);
1804 ef4_remove_nic(efx
);
1809 /* If the interface is supposed to be running but is not, start
1810 * the hardware and software data path, regular activity for the port
1811 * (MAC statistics, link polling, etc.) and schedule the port to be
1812 * reconfigured. Interrupts must already be enabled. This function
1813 * is safe to call multiple times, so long as the NIC is not disabled.
1814 * Requires the RTNL lock.
1816 static void ef4_start_all(struct ef4_nic
*efx
)
1818 EF4_ASSERT_RESET_SERIALISED(efx
);
1819 BUG_ON(efx
->state
== STATE_DISABLED
);
1821 /* Check that it is appropriate to restart the interface. All
1822 * of these flags are safe to read under just the rtnl lock */
1823 if (efx
->port_enabled
|| !netif_running(efx
->net_dev
) ||
1827 ef4_start_port(efx
);
1828 ef4_start_datapath(efx
);
1830 /* Start the hardware monitor if there is one */
1831 if (efx
->type
->monitor
!= NULL
)
1832 queue_delayed_work(efx
->workqueue
, &efx
->monitor_work
,
1833 ef4_monitor_interval
);
1835 efx
->type
->start_stats(efx
);
1836 efx
->type
->pull_stats(efx
);
1837 spin_lock_bh(&efx
->stats_lock
);
1838 efx
->type
->update_stats(efx
, NULL
, NULL
);
1839 spin_unlock_bh(&efx
->stats_lock
);
1842 /* Quiesce the hardware and software data path, and regular activity
1843 * for the port without bringing the link down. Safe to call multiple
1844 * times with the NIC in almost any state, but interrupts should be
1845 * enabled. Requires the RTNL lock.
1847 static void ef4_stop_all(struct ef4_nic
*efx
)
1849 EF4_ASSERT_RESET_SERIALISED(efx
);
1851 /* port_enabled can be read safely under the rtnl lock */
1852 if (!efx
->port_enabled
)
1855 /* update stats before we go down so we can accurately count
1858 efx
->type
->pull_stats(efx
);
1859 spin_lock_bh(&efx
->stats_lock
);
1860 efx
->type
->update_stats(efx
, NULL
, NULL
);
1861 spin_unlock_bh(&efx
->stats_lock
);
1862 efx
->type
->stop_stats(efx
);
1865 /* Stop the kernel transmit interface. This is only valid if
1866 * the device is stopped or detached; otherwise the watchdog
1867 * may fire immediately.
1869 WARN_ON(netif_running(efx
->net_dev
) &&
1870 netif_device_present(efx
->net_dev
));
1871 netif_tx_disable(efx
->net_dev
);
1873 ef4_stop_datapath(efx
);
1876 static void ef4_remove_all(struct ef4_nic
*efx
)
1878 ef4_remove_channels(efx
);
1879 ef4_remove_filters(efx
);
1880 ef4_remove_port(efx
);
1881 ef4_remove_nic(efx
);
1884 /**************************************************************************
1886 * Interrupt moderation
1888 **************************************************************************/
1889 unsigned int ef4_usecs_to_ticks(struct ef4_nic
*efx
, unsigned int usecs
)
1893 if (usecs
* 1000 < efx
->timer_quantum_ns
)
1894 return 1; /* never round down to 0 */
1895 return usecs
* 1000 / efx
->timer_quantum_ns
;
1898 unsigned int ef4_ticks_to_usecs(struct ef4_nic
*efx
, unsigned int ticks
)
1900 /* We must round up when converting ticks to microseconds
1901 * because we round down when converting the other way.
1903 return DIV_ROUND_UP(ticks
* efx
->timer_quantum_ns
, 1000);
1906 /* Set interrupt moderation parameters */
1907 int ef4_init_irq_moderation(struct ef4_nic
*efx
, unsigned int tx_usecs
,
1908 unsigned int rx_usecs
, bool rx_adaptive
,
1909 bool rx_may_override_tx
)
1911 struct ef4_channel
*channel
;
1912 unsigned int timer_max_us
;
1914 EF4_ASSERT_RESET_SERIALISED(efx
);
1916 timer_max_us
= efx
->timer_max_ns
/ 1000;
1918 if (tx_usecs
> timer_max_us
|| rx_usecs
> timer_max_us
)
1921 if (tx_usecs
!= rx_usecs
&& efx
->tx_channel_offset
== 0 &&
1922 !rx_may_override_tx
) {
1923 netif_err(efx
, drv
, efx
->net_dev
, "Channels are shared. "
1924 "RX and TX IRQ moderation must be equal\n");
1928 efx
->irq_rx_adaptive
= rx_adaptive
;
1929 efx
->irq_rx_moderation_us
= rx_usecs
;
1930 ef4_for_each_channel(channel
, efx
) {
1931 if (ef4_channel_has_rx_queue(channel
))
1932 channel
->irq_moderation_us
= rx_usecs
;
1933 else if (ef4_channel_has_tx_queues(channel
))
1934 channel
->irq_moderation_us
= tx_usecs
;
1940 void ef4_get_irq_moderation(struct ef4_nic
*efx
, unsigned int *tx_usecs
,
1941 unsigned int *rx_usecs
, bool *rx_adaptive
)
1943 *rx_adaptive
= efx
->irq_rx_adaptive
;
1944 *rx_usecs
= efx
->irq_rx_moderation_us
;
1946 /* If channels are shared between RX and TX, so is IRQ
1947 * moderation. Otherwise, IRQ moderation is the same for all
1948 * TX channels and is not adaptive.
1950 if (efx
->tx_channel_offset
== 0) {
1951 *tx_usecs
= *rx_usecs
;
1953 struct ef4_channel
*tx_channel
;
1955 tx_channel
= efx
->channel
[efx
->tx_channel_offset
];
1956 *tx_usecs
= tx_channel
->irq_moderation_us
;
1960 /**************************************************************************
1964 **************************************************************************/
1966 /* Run periodically off the general workqueue */
1967 static void ef4_monitor(struct work_struct
*data
)
1969 struct ef4_nic
*efx
= container_of(data
, struct ef4_nic
,
1972 netif_vdbg(efx
, timer
, efx
->net_dev
,
1973 "hardware monitor executing on CPU %d\n",
1974 raw_smp_processor_id());
1975 BUG_ON(efx
->type
->monitor
== NULL
);
1977 /* If the mac_lock is already held then it is likely a port
1978 * reconfiguration is already in place, which will likely do
1979 * most of the work of monitor() anyway. */
1980 if (mutex_trylock(&efx
->mac_lock
)) {
1981 if (efx
->port_enabled
)
1982 efx
->type
->monitor(efx
);
1983 mutex_unlock(&efx
->mac_lock
);
1986 queue_delayed_work(efx
->workqueue
, &efx
->monitor_work
,
1987 ef4_monitor_interval
);
1990 /**************************************************************************
1994 *************************************************************************/
1997 * Context: process, rtnl_lock() held.
1999 static int ef4_ioctl(struct net_device
*net_dev
, struct ifreq
*ifr
, int cmd
)
2001 struct ef4_nic
*efx
= netdev_priv(net_dev
);
2002 struct mii_ioctl_data
*data
= if_mii(ifr
);
2004 /* Convert phy_id from older PRTAD/DEVAD format */
2005 if ((cmd
== SIOCGMIIREG
|| cmd
== SIOCSMIIREG
) &&
2006 (data
->phy_id
& 0xfc00) == 0x0400)
2007 data
->phy_id
^= MDIO_PHY_ID_C45
| 0x0400;
2009 return mdio_mii_ioctl(&efx
->mdio
, data
, cmd
);
2012 /**************************************************************************
2016 **************************************************************************/
2018 static void ef4_init_napi_channel(struct ef4_channel
*channel
)
2020 struct ef4_nic
*efx
= channel
->efx
;
2022 channel
->napi_dev
= efx
->net_dev
;
2023 netif_napi_add(channel
->napi_dev
, &channel
->napi_str
,
2024 ef4_poll
, napi_weight
);
2027 static void ef4_init_napi(struct ef4_nic
*efx
)
2029 struct ef4_channel
*channel
;
2031 ef4_for_each_channel(channel
, efx
)
2032 ef4_init_napi_channel(channel
);
2035 static void ef4_fini_napi_channel(struct ef4_channel
*channel
)
2037 if (channel
->napi_dev
)
2038 netif_napi_del(&channel
->napi_str
);
2040 channel
->napi_dev
= NULL
;
2043 static void ef4_fini_napi(struct ef4_nic
*efx
)
2045 struct ef4_channel
*channel
;
2047 ef4_for_each_channel(channel
, efx
)
2048 ef4_fini_napi_channel(channel
);
2051 /**************************************************************************
2053 * Kernel netpoll interface
2055 *************************************************************************/
2057 #ifdef CONFIG_NET_POLL_CONTROLLER
2059 /* Although in the common case interrupts will be disabled, this is not
2060 * guaranteed. However, all our work happens inside the NAPI callback,
2061 * so no locking is required.
2063 static void ef4_netpoll(struct net_device
*net_dev
)
2065 struct ef4_nic
*efx
= netdev_priv(net_dev
);
2066 struct ef4_channel
*channel
;
2068 ef4_for_each_channel(channel
, efx
)
2069 ef4_schedule_channel(channel
);
2074 /**************************************************************************
2076 * Kernel net device interface
2078 *************************************************************************/
2080 /* Context: process, rtnl_lock() held. */
2081 int ef4_net_open(struct net_device
*net_dev
)
2083 struct ef4_nic
*efx
= netdev_priv(net_dev
);
2086 netif_dbg(efx
, ifup
, efx
->net_dev
, "opening device on CPU %d\n",
2087 raw_smp_processor_id());
2089 rc
= ef4_check_disabled(efx
);
2092 if (efx
->phy_mode
& PHY_MODE_SPECIAL
)
2095 /* Notify the kernel of the link state polled during driver load,
2096 * before the monitor starts running */
2097 ef4_link_status_changed(efx
);
2100 ef4_selftest_async_start(efx
);
2104 /* Context: process, rtnl_lock() held.
2105 * Note that the kernel will ignore our return code; this method
2106 * should really be a void.
2108 int ef4_net_stop(struct net_device
*net_dev
)
2110 struct ef4_nic
*efx
= netdev_priv(net_dev
);
2112 netif_dbg(efx
, ifdown
, efx
->net_dev
, "closing on CPU %d\n",
2113 raw_smp_processor_id());
2115 /* Stop the device and flush all the channels */
2121 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
2122 static void ef4_net_stats(struct net_device
*net_dev
,
2123 struct rtnl_link_stats64
*stats
)
2125 struct ef4_nic
*efx
= netdev_priv(net_dev
);
2127 spin_lock_bh(&efx
->stats_lock
);
2128 efx
->type
->update_stats(efx
, NULL
, stats
);
2129 spin_unlock_bh(&efx
->stats_lock
);
2132 /* Context: netif_tx_lock held, BHs disabled. */
2133 static void ef4_watchdog(struct net_device
*net_dev
)
2135 struct ef4_nic
*efx
= netdev_priv(net_dev
);
2137 netif_err(efx
, tx_err
, efx
->net_dev
,
2138 "TX stuck with port_enabled=%d: resetting channels\n",
2141 ef4_schedule_reset(efx
, RESET_TYPE_TX_WATCHDOG
);
2145 /* Context: process, rtnl_lock() held. */
2146 static int ef4_change_mtu(struct net_device
*net_dev
, int new_mtu
)
2148 struct ef4_nic
*efx
= netdev_priv(net_dev
);
2151 rc
= ef4_check_disabled(efx
);
2155 netif_dbg(efx
, drv
, efx
->net_dev
, "changing MTU to %d\n", new_mtu
);
2157 ef4_device_detach_sync(efx
);
2160 mutex_lock(&efx
->mac_lock
);
2161 net_dev
->mtu
= new_mtu
;
2162 ef4_mac_reconfigure(efx
);
2163 mutex_unlock(&efx
->mac_lock
);
2166 netif_device_attach(efx
->net_dev
);
2170 static int ef4_set_mac_address(struct net_device
*net_dev
, void *data
)
2172 struct ef4_nic
*efx
= netdev_priv(net_dev
);
2173 struct sockaddr
*addr
= data
;
2174 u8
*new_addr
= addr
->sa_data
;
2178 if (!is_valid_ether_addr(new_addr
)) {
2179 netif_err(efx
, drv
, efx
->net_dev
,
2180 "invalid ethernet MAC address requested: %pM\n",
2182 return -EADDRNOTAVAIL
;
2185 /* save old address */
2186 ether_addr_copy(old_addr
, net_dev
->dev_addr
);
2187 ether_addr_copy(net_dev
->dev_addr
, new_addr
);
2188 if (efx
->type
->set_mac_address
) {
2189 rc
= efx
->type
->set_mac_address(efx
);
2191 ether_addr_copy(net_dev
->dev_addr
, old_addr
);
2196 /* Reconfigure the MAC */
2197 mutex_lock(&efx
->mac_lock
);
2198 ef4_mac_reconfigure(efx
);
2199 mutex_unlock(&efx
->mac_lock
);
2204 /* Context: netif_addr_lock held, BHs disabled. */
2205 static void ef4_set_rx_mode(struct net_device
*net_dev
)
2207 struct ef4_nic
*efx
= netdev_priv(net_dev
);
2209 if (efx
->port_enabled
)
2210 queue_work(efx
->workqueue
, &efx
->mac_work
);
2211 /* Otherwise ef4_start_port() will do this */
2214 static int ef4_set_features(struct net_device
*net_dev
, netdev_features_t data
)
2216 struct ef4_nic
*efx
= netdev_priv(net_dev
);
2219 /* If disabling RX n-tuple filtering, clear existing filters */
2220 if (net_dev
->features
& ~data
& NETIF_F_NTUPLE
) {
2221 rc
= efx
->type
->filter_clear_rx(efx
, EF4_FILTER_PRI_MANUAL
);
2226 /* If Rx VLAN filter is changed, update filters via mac_reconfigure */
2227 if ((net_dev
->features
^ data
) & NETIF_F_HW_VLAN_CTAG_FILTER
) {
2228 /* ef4_set_rx_mode() will schedule MAC work to update filters
2229 * when a new features are finally set in net_dev.
2231 ef4_set_rx_mode(net_dev
);
2237 static const struct net_device_ops ef4_netdev_ops
= {
2238 .ndo_open
= ef4_net_open
,
2239 .ndo_stop
= ef4_net_stop
,
2240 .ndo_get_stats64
= ef4_net_stats
,
2241 .ndo_tx_timeout
= ef4_watchdog
,
2242 .ndo_start_xmit
= ef4_hard_start_xmit
,
2243 .ndo_validate_addr
= eth_validate_addr
,
2244 .ndo_do_ioctl
= ef4_ioctl
,
2245 .ndo_change_mtu
= ef4_change_mtu
,
2246 .ndo_set_mac_address
= ef4_set_mac_address
,
2247 .ndo_set_rx_mode
= ef4_set_rx_mode
,
2248 .ndo_set_features
= ef4_set_features
,
2249 #ifdef CONFIG_NET_POLL_CONTROLLER
2250 .ndo_poll_controller
= ef4_netpoll
,
2252 .ndo_setup_tc
= ef4_setup_tc
,
2253 #ifdef CONFIG_RFS_ACCEL
2254 .ndo_rx_flow_steer
= ef4_filter_rfs
,
2258 static void ef4_update_name(struct ef4_nic
*efx
)
2260 strcpy(efx
->name
, efx
->net_dev
->name
);
2261 ef4_mtd_rename(efx
);
2262 ef4_set_channel_names(efx
);
2265 static int ef4_netdev_event(struct notifier_block
*this,
2266 unsigned long event
, void *ptr
)
2268 struct net_device
*net_dev
= netdev_notifier_info_to_dev(ptr
);
2270 if ((net_dev
->netdev_ops
== &ef4_netdev_ops
) &&
2271 event
== NETDEV_CHANGENAME
)
2272 ef4_update_name(netdev_priv(net_dev
));
2277 static struct notifier_block ef4_netdev_notifier
= {
2278 .notifier_call
= ef4_netdev_event
,
2282 show_phy_type(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2284 struct ef4_nic
*efx
= pci_get_drvdata(to_pci_dev(dev
));
2285 return sprintf(buf
, "%d\n", efx
->phy_type
);
2287 static DEVICE_ATTR(phy_type
, 0444, show_phy_type
, NULL
);
2289 static int ef4_register_netdev(struct ef4_nic
*efx
)
2291 struct net_device
*net_dev
= efx
->net_dev
;
2292 struct ef4_channel
*channel
;
2295 net_dev
->watchdog_timeo
= 5 * HZ
;
2296 net_dev
->irq
= efx
->pci_dev
->irq
;
2297 net_dev
->netdev_ops
= &ef4_netdev_ops
;
2298 net_dev
->ethtool_ops
= &ef4_ethtool_ops
;
2299 net_dev
->gso_max_segs
= EF4_TSO_MAX_SEGS
;
2300 net_dev
->min_mtu
= EF4_MIN_MTU
;
2301 net_dev
->max_mtu
= EF4_MAX_MTU
;
2305 /* Enable resets to be scheduled and check whether any were
2306 * already requested. If so, the NIC is probably hosed so we
2309 efx
->state
= STATE_READY
;
2310 smp_mb(); /* ensure we change state before checking reset_pending */
2311 if (efx
->reset_pending
) {
2312 netif_err(efx
, probe
, efx
->net_dev
,
2313 "aborting probe due to scheduled reset\n");
2318 rc
= dev_alloc_name(net_dev
, net_dev
->name
);
2321 ef4_update_name(efx
);
2323 /* Always start with carrier off; PHY events will detect the link */
2324 netif_carrier_off(net_dev
);
2326 rc
= register_netdevice(net_dev
);
2330 ef4_for_each_channel(channel
, efx
) {
2331 struct ef4_tx_queue
*tx_queue
;
2332 ef4_for_each_channel_tx_queue(tx_queue
, channel
)
2333 ef4_init_tx_queue_core_txq(tx_queue
);
2340 rc
= device_create_file(&efx
->pci_dev
->dev
, &dev_attr_phy_type
);
2342 netif_err(efx
, drv
, efx
->net_dev
,
2343 "failed to init net dev attributes\n");
2344 goto fail_registered
;
2350 ef4_dissociate(efx
);
2351 unregister_netdevice(net_dev
);
2353 efx
->state
= STATE_UNINIT
;
2355 netif_err(efx
, drv
, efx
->net_dev
, "could not register net dev\n");
2359 static void ef4_unregister_netdev(struct ef4_nic
*efx
)
2364 BUG_ON(netdev_priv(efx
->net_dev
) != efx
);
2366 if (ef4_dev_registered(efx
)) {
2367 strlcpy(efx
->name
, pci_name(efx
->pci_dev
), sizeof(efx
->name
));
2368 device_remove_file(&efx
->pci_dev
->dev
, &dev_attr_phy_type
);
2369 unregister_netdev(efx
->net_dev
);
2373 /**************************************************************************
2375 * Device reset and suspend
2377 **************************************************************************/
2379 /* Tears down the entire software state and most of the hardware state
2381 void ef4_reset_down(struct ef4_nic
*efx
, enum reset_type method
)
2383 EF4_ASSERT_RESET_SERIALISED(efx
);
2386 ef4_disable_interrupts(efx
);
2388 mutex_lock(&efx
->mac_lock
);
2389 if (efx
->port_initialized
&& method
!= RESET_TYPE_INVISIBLE
&&
2390 method
!= RESET_TYPE_DATAPATH
)
2391 efx
->phy_op
->fini(efx
);
2392 efx
->type
->fini(efx
);
2395 /* This function will always ensure that the locks acquired in
2396 * ef4_reset_down() are released. A failure return code indicates
2397 * that we were unable to reinitialise the hardware, and the
2398 * driver should be disabled. If ok is false, then the rx and tx
2399 * engines are not restarted, pending a RESET_DISABLE. */
2400 int ef4_reset_up(struct ef4_nic
*efx
, enum reset_type method
, bool ok
)
2404 EF4_ASSERT_RESET_SERIALISED(efx
);
2406 /* Ensure that SRAM is initialised even if we're disabling the device */
2407 rc
= efx
->type
->init(efx
);
2409 netif_err(efx
, drv
, efx
->net_dev
, "failed to initialise NIC\n");
2416 if (efx
->port_initialized
&& method
!= RESET_TYPE_INVISIBLE
&&
2417 method
!= RESET_TYPE_DATAPATH
) {
2418 rc
= efx
->phy_op
->init(efx
);
2421 rc
= efx
->phy_op
->reconfigure(efx
);
2422 if (rc
&& rc
!= -EPERM
)
2423 netif_err(efx
, drv
, efx
->net_dev
,
2424 "could not restore PHY settings\n");
2427 rc
= ef4_enable_interrupts(efx
);
2431 down_read(&efx
->filter_sem
);
2432 ef4_restore_filters(efx
);
2433 up_read(&efx
->filter_sem
);
2435 mutex_unlock(&efx
->mac_lock
);
2442 efx
->port_initialized
= false;
2444 mutex_unlock(&efx
->mac_lock
);
2449 /* Reset the NIC using the specified method. Note that the reset may
2450 * fail, in which case the card will be left in an unusable state.
2452 * Caller must hold the rtnl_lock.
2454 int ef4_reset(struct ef4_nic
*efx
, enum reset_type method
)
2459 netif_info(efx
, drv
, efx
->net_dev
, "resetting (%s)\n",
2460 RESET_TYPE(method
));
2462 ef4_device_detach_sync(efx
);
2463 ef4_reset_down(efx
, method
);
2465 rc
= efx
->type
->reset(efx
, method
);
2467 netif_err(efx
, drv
, efx
->net_dev
, "failed to reset hardware\n");
2471 /* Clear flags for the scopes we covered. We assume the NIC and
2472 * driver are now quiescent so that there is no race here.
2474 if (method
< RESET_TYPE_MAX_METHOD
)
2475 efx
->reset_pending
&= -(1 << (method
+ 1));
2476 else /* it doesn't fit into the well-ordered scope hierarchy */
2477 __clear_bit(method
, &efx
->reset_pending
);
2479 /* Reinitialise bus-mastering, which may have been turned off before
2480 * the reset was scheduled. This is still appropriate, even in the
2481 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
2482 * can respond to requests. */
2483 pci_set_master(efx
->pci_dev
);
2486 /* Leave device stopped if necessary */
2488 method
== RESET_TYPE_DISABLE
||
2489 method
== RESET_TYPE_RECOVER_OR_DISABLE
;
2490 rc2
= ef4_reset_up(efx
, method
, !disabled
);
2498 dev_close(efx
->net_dev
);
2499 netif_err(efx
, drv
, efx
->net_dev
, "has been disabled\n");
2500 efx
->state
= STATE_DISABLED
;
2502 netif_dbg(efx
, drv
, efx
->net_dev
, "reset complete\n");
2503 netif_device_attach(efx
->net_dev
);
2508 /* Try recovery mechanisms.
2509 * For now only EEH is supported.
2510 * Returns 0 if the recovery mechanisms are unsuccessful.
2511 * Returns a non-zero value otherwise.
2513 int ef4_try_recovery(struct ef4_nic
*efx
)
2516 /* A PCI error can occur and not be seen by EEH because nothing
2517 * happens on the PCI bus. In this case the driver may fail and
2518 * schedule a 'recover or reset', leading to this recovery handler.
2519 * Manually call the eeh failure check function.
2521 struct eeh_dev
*eehdev
= pci_dev_to_eeh_dev(efx
->pci_dev
);
2522 if (eeh_dev_check_failure(eehdev
)) {
2523 /* The EEH mechanisms will handle the error and reset the
2524 * device if necessary.
2532 /* The worker thread exists so that code that cannot sleep can
2533 * schedule a reset for later.
2535 static void ef4_reset_work(struct work_struct
*data
)
2537 struct ef4_nic
*efx
= container_of(data
, struct ef4_nic
, reset_work
);
2538 unsigned long pending
;
2539 enum reset_type method
;
2541 pending
= ACCESS_ONCE(efx
->reset_pending
);
2542 method
= fls(pending
) - 1;
2544 if ((method
== RESET_TYPE_RECOVER_OR_DISABLE
||
2545 method
== RESET_TYPE_RECOVER_OR_ALL
) &&
2546 ef4_try_recovery(efx
))
2554 /* We checked the state in ef4_schedule_reset() but it may
2555 * have changed by now. Now that we have the RTNL lock,
2556 * it cannot change again.
2558 if (efx
->state
== STATE_READY
)
2559 (void)ef4_reset(efx
, method
);
2564 void ef4_schedule_reset(struct ef4_nic
*efx
, enum reset_type type
)
2566 enum reset_type method
;
2568 if (efx
->state
== STATE_RECOVERY
) {
2569 netif_dbg(efx
, drv
, efx
->net_dev
,
2570 "recovering: skip scheduling %s reset\n",
2576 case RESET_TYPE_INVISIBLE
:
2577 case RESET_TYPE_ALL
:
2578 case RESET_TYPE_RECOVER_OR_ALL
:
2579 case RESET_TYPE_WORLD
:
2580 case RESET_TYPE_DISABLE
:
2581 case RESET_TYPE_RECOVER_OR_DISABLE
:
2582 case RESET_TYPE_DATAPATH
:
2584 netif_dbg(efx
, drv
, efx
->net_dev
, "scheduling %s reset\n",
2585 RESET_TYPE(method
));
2588 method
= efx
->type
->map_reset_reason(type
);
2589 netif_dbg(efx
, drv
, efx
->net_dev
,
2590 "scheduling %s reset for %s\n",
2591 RESET_TYPE(method
), RESET_TYPE(type
));
2595 set_bit(method
, &efx
->reset_pending
);
2596 smp_mb(); /* ensure we change reset_pending before checking state */
2598 /* If we're not READY then just leave the flags set as the cue
2599 * to abort probing or reschedule the reset later.
2601 if (ACCESS_ONCE(efx
->state
) != STATE_READY
)
2604 queue_work(reset_workqueue
, &efx
->reset_work
);
2607 /**************************************************************************
2609 * List of NICs we support
2611 **************************************************************************/
2613 /* PCI device ID table */
2614 static const struct pci_device_id ef4_pci_table
[] = {
2615 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
,
2616 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0
),
2617 .driver_data
= (unsigned long) &falcon_a1_nic_type
},
2618 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
,
2619 PCI_DEVICE_ID_SOLARFLARE_SFC4000B
),
2620 .driver_data
= (unsigned long) &falcon_b0_nic_type
},
2621 {0} /* end of list */
2624 /**************************************************************************
2626 * Dummy PHY/MAC operations
2628 * Can be used for some unimplemented operations
2629 * Needed so all function pointers are valid and do not have to be tested
2632 **************************************************************************/
2633 int ef4_port_dummy_op_int(struct ef4_nic
*efx
)
2637 void ef4_port_dummy_op_void(struct ef4_nic
*efx
) {}
2639 static bool ef4_port_dummy_op_poll(struct ef4_nic
*efx
)
2644 static const struct ef4_phy_operations ef4_dummy_phy_operations
= {
2645 .init
= ef4_port_dummy_op_int
,
2646 .reconfigure
= ef4_port_dummy_op_int
,
2647 .poll
= ef4_port_dummy_op_poll
,
2648 .fini
= ef4_port_dummy_op_void
,
2651 /**************************************************************************
2655 **************************************************************************/
2657 /* This zeroes out and then fills in the invariants in a struct
2658 * ef4_nic (including all sub-structures).
2660 static int ef4_init_struct(struct ef4_nic
*efx
,
2661 struct pci_dev
*pci_dev
, struct net_device
*net_dev
)
2665 /* Initialise common structures */
2666 INIT_LIST_HEAD(&efx
->node
);
2667 INIT_LIST_HEAD(&efx
->secondary_list
);
2668 spin_lock_init(&efx
->biu_lock
);
2669 #ifdef CONFIG_SFC_FALCON_MTD
2670 INIT_LIST_HEAD(&efx
->mtd_list
);
2672 INIT_WORK(&efx
->reset_work
, ef4_reset_work
);
2673 INIT_DELAYED_WORK(&efx
->monitor_work
, ef4_monitor
);
2674 INIT_DELAYED_WORK(&efx
->selftest_work
, ef4_selftest_async_work
);
2675 efx
->pci_dev
= pci_dev
;
2676 efx
->msg_enable
= debug
;
2677 efx
->state
= STATE_UNINIT
;
2678 strlcpy(efx
->name
, pci_name(pci_dev
), sizeof(efx
->name
));
2680 efx
->net_dev
= net_dev
;
2681 efx
->rx_prefix_size
= efx
->type
->rx_prefix_size
;
2683 NET_IP_ALIGN
? (efx
->rx_prefix_size
+ NET_IP_ALIGN
) % 4 : 0;
2684 efx
->rx_packet_hash_offset
=
2685 efx
->type
->rx_hash_offset
- efx
->type
->rx_prefix_size
;
2686 efx
->rx_packet_ts_offset
=
2687 efx
->type
->rx_ts_offset
- efx
->type
->rx_prefix_size
;
2688 spin_lock_init(&efx
->stats_lock
);
2689 mutex_init(&efx
->mac_lock
);
2690 efx
->phy_op
= &ef4_dummy_phy_operations
;
2691 efx
->mdio
.dev
= net_dev
;
2692 INIT_WORK(&efx
->mac_work
, ef4_mac_work
);
2693 init_waitqueue_head(&efx
->flush_wq
);
2695 for (i
= 0; i
< EF4_MAX_CHANNELS
; i
++) {
2696 efx
->channel
[i
] = ef4_alloc_channel(efx
, i
, NULL
);
2697 if (!efx
->channel
[i
])
2699 efx
->msi_context
[i
].efx
= efx
;
2700 efx
->msi_context
[i
].index
= i
;
2703 /* Higher numbered interrupt modes are less capable! */
2704 efx
->interrupt_mode
= max(efx
->type
->max_interrupt_mode
,
2707 /* Would be good to use the net_dev name, but we're too early */
2708 snprintf(efx
->workqueue_name
, sizeof(efx
->workqueue_name
), "sfc%s",
2710 efx
->workqueue
= create_singlethread_workqueue(efx
->workqueue_name
);
2711 if (!efx
->workqueue
)
2717 ef4_fini_struct(efx
);
2721 static void ef4_fini_struct(struct ef4_nic
*efx
)
2725 for (i
= 0; i
< EF4_MAX_CHANNELS
; i
++)
2726 kfree(efx
->channel
[i
]);
2730 if (efx
->workqueue
) {
2731 destroy_workqueue(efx
->workqueue
);
2732 efx
->workqueue
= NULL
;
2736 void ef4_update_sw_stats(struct ef4_nic
*efx
, u64
*stats
)
2738 u64 n_rx_nodesc_trunc
= 0;
2739 struct ef4_channel
*channel
;
2741 ef4_for_each_channel(channel
, efx
)
2742 n_rx_nodesc_trunc
+= channel
->n_rx_nodesc_trunc
;
2743 stats
[GENERIC_STAT_rx_nodesc_trunc
] = n_rx_nodesc_trunc
;
2744 stats
[GENERIC_STAT_rx_noskb_drops
] = atomic_read(&efx
->n_rx_noskb_drops
);
2747 /**************************************************************************
2751 **************************************************************************/
2753 /* Main body of final NIC shutdown code
2754 * This is called only at module unload (or hotplug removal).
2756 static void ef4_pci_remove_main(struct ef4_nic
*efx
)
2758 /* Flush reset_work. It can no longer be scheduled since we
2761 BUG_ON(efx
->state
== STATE_READY
);
2762 cancel_work_sync(&efx
->reset_work
);
2764 ef4_disable_interrupts(efx
);
2765 ef4_nic_fini_interrupt(efx
);
2767 efx
->type
->fini(efx
);
2769 ef4_remove_all(efx
);
2772 /* Final NIC shutdown
2773 * This is called only at module unload (or hotplug removal). A PF can call
2774 * this on its VFs to ensure they are unbound first.
2776 static void ef4_pci_remove(struct pci_dev
*pci_dev
)
2778 struct ef4_nic
*efx
;
2780 efx
= pci_get_drvdata(pci_dev
);
2784 /* Mark the NIC as fini, then stop the interface */
2786 ef4_dissociate(efx
);
2787 dev_close(efx
->net_dev
);
2788 ef4_disable_interrupts(efx
);
2789 efx
->state
= STATE_UNINIT
;
2792 ef4_unregister_netdev(efx
);
2794 ef4_mtd_remove(efx
);
2796 ef4_pci_remove_main(efx
);
2799 netif_dbg(efx
, drv
, efx
->net_dev
, "shutdown successful\n");
2801 ef4_fini_struct(efx
);
2802 free_netdev(efx
->net_dev
);
2804 pci_disable_pcie_error_reporting(pci_dev
);
2807 /* NIC VPD information
2808 * Called during probe to display the part number of the
2809 * installed NIC. VPD is potentially very large but this should
2810 * always appear within the first 512 bytes.
2812 #define SFC_VPD_LEN 512
2813 static void ef4_probe_vpd_strings(struct ef4_nic
*efx
)
2815 struct pci_dev
*dev
= efx
->pci_dev
;
2816 char vpd_data
[SFC_VPD_LEN
];
2818 int ro_start
, ro_size
, i
, j
;
2820 /* Get the vpd data from the device */
2821 vpd_size
= pci_read_vpd(dev
, 0, sizeof(vpd_data
), vpd_data
);
2822 if (vpd_size
<= 0) {
2823 netif_err(efx
, drv
, efx
->net_dev
, "Unable to read VPD\n");
2827 /* Get the Read only section */
2828 ro_start
= pci_vpd_find_tag(vpd_data
, 0, vpd_size
, PCI_VPD_LRDT_RO_DATA
);
2830 netif_err(efx
, drv
, efx
->net_dev
, "VPD Read-only not found\n");
2834 ro_size
= pci_vpd_lrdt_size(&vpd_data
[ro_start
]);
2836 i
= ro_start
+ PCI_VPD_LRDT_TAG_SIZE
;
2837 if (i
+ j
> vpd_size
)
2840 /* Get the Part number */
2841 i
= pci_vpd_find_info_keyword(vpd_data
, i
, j
, "PN");
2843 netif_err(efx
, drv
, efx
->net_dev
, "Part number not found\n");
2847 j
= pci_vpd_info_field_size(&vpd_data
[i
]);
2848 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
2849 if (i
+ j
> vpd_size
) {
2850 netif_err(efx
, drv
, efx
->net_dev
, "Incomplete part number\n");
2854 netif_info(efx
, drv
, efx
->net_dev
,
2855 "Part Number : %.*s\n", j
, &vpd_data
[i
]);
2857 i
= ro_start
+ PCI_VPD_LRDT_TAG_SIZE
;
2859 i
= pci_vpd_find_info_keyword(vpd_data
, i
, j
, "SN");
2861 netif_err(efx
, drv
, efx
->net_dev
, "Serial number not found\n");
2865 j
= pci_vpd_info_field_size(&vpd_data
[i
]);
2866 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
2867 if (i
+ j
> vpd_size
) {
2868 netif_err(efx
, drv
, efx
->net_dev
, "Incomplete serial number\n");
2872 efx
->vpd_sn
= kmalloc(j
+ 1, GFP_KERNEL
);
2876 snprintf(efx
->vpd_sn
, j
+ 1, "%s", &vpd_data
[i
]);
2880 /* Main body of NIC initialisation
2881 * This is called at module load (or hotplug insertion, theoretically).
2883 static int ef4_pci_probe_main(struct ef4_nic
*efx
)
2887 /* Do start-of-day initialisation */
2888 rc
= ef4_probe_all(efx
);
2894 rc
= efx
->type
->init(efx
);
2896 netif_err(efx
, probe
, efx
->net_dev
,
2897 "failed to initialise NIC\n");
2901 rc
= ef4_init_port(efx
);
2903 netif_err(efx
, probe
, efx
->net_dev
,
2904 "failed to initialise port\n");
2908 rc
= ef4_nic_init_interrupt(efx
);
2911 rc
= ef4_enable_interrupts(efx
);
2918 ef4_nic_fini_interrupt(efx
);
2922 efx
->type
->fini(efx
);
2925 ef4_remove_all(efx
);
2930 /* NIC initialisation
2932 * This is called at module load (or hotplug insertion,
2933 * theoretically). It sets up PCI mappings, resets the NIC,
2934 * sets up and registers the network devices with the kernel and hooks
2935 * the interrupt service routine. It does not prepare the device for
2936 * transmission; this is left to the first time one of the network
2937 * interfaces is brought up (i.e. ef4_net_open).
2939 static int ef4_pci_probe(struct pci_dev
*pci_dev
,
2940 const struct pci_device_id
*entry
)
2942 struct net_device
*net_dev
;
2943 struct ef4_nic
*efx
;
2946 /* Allocate and initialise a struct net_device and struct ef4_nic */
2947 net_dev
= alloc_etherdev_mqs(sizeof(*efx
), EF4_MAX_CORE_TX_QUEUES
,
2951 efx
= netdev_priv(net_dev
);
2952 efx
->type
= (const struct ef4_nic_type
*) entry
->driver_data
;
2953 efx
->fixed_features
|= NETIF_F_HIGHDMA
;
2955 pci_set_drvdata(pci_dev
, efx
);
2956 SET_NETDEV_DEV(net_dev
, &pci_dev
->dev
);
2957 rc
= ef4_init_struct(efx
, pci_dev
, net_dev
);
2961 netif_info(efx
, probe
, efx
->net_dev
,
2962 "Solarflare NIC detected\n");
2964 ef4_probe_vpd_strings(efx
);
2966 /* Set up basic I/O (BAR mappings etc) */
2967 rc
= ef4_init_io(efx
);
2971 rc
= ef4_pci_probe_main(efx
);
2975 net_dev
->features
|= (efx
->type
->offload_features
| NETIF_F_SG
|
2977 /* Mask for features that also apply to VLAN devices */
2978 net_dev
->vlan_features
|= (NETIF_F_HW_CSUM
| NETIF_F_SG
|
2979 NETIF_F_HIGHDMA
| NETIF_F_RXCSUM
);
2981 net_dev
->hw_features
= net_dev
->features
& ~efx
->fixed_features
;
2983 /* Disable VLAN filtering by default. It may be enforced if
2984 * the feature is fixed (i.e. VLAN filters are required to
2985 * receive VLAN tagged packets due to vPort restrictions).
2987 net_dev
->features
&= ~NETIF_F_HW_VLAN_CTAG_FILTER
;
2988 net_dev
->features
|= efx
->fixed_features
;
2990 rc
= ef4_register_netdev(efx
);
2994 netif_dbg(efx
, probe
, efx
->net_dev
, "initialisation successful\n");
2996 /* Try to create MTDs, but allow this to fail */
2998 rc
= ef4_mtd_probe(efx
);
3000 if (rc
&& rc
!= -EPERM
)
3001 netif_warn(efx
, probe
, efx
->net_dev
,
3002 "failed to create MTDs (%d)\n", rc
);
3004 rc
= pci_enable_pcie_error_reporting(pci_dev
);
3005 if (rc
&& rc
!= -EINVAL
)
3006 netif_notice(efx
, probe
, efx
->net_dev
,
3007 "PCIE error reporting unavailable (%d).\n",
3013 ef4_pci_remove_main(efx
);
3017 ef4_fini_struct(efx
);
3020 netif_dbg(efx
, drv
, efx
->net_dev
, "initialisation failed. rc=%d\n", rc
);
3021 free_netdev(net_dev
);
3025 static int ef4_pm_freeze(struct device
*dev
)
3027 struct ef4_nic
*efx
= pci_get_drvdata(to_pci_dev(dev
));
3031 if (efx
->state
!= STATE_DISABLED
) {
3032 efx
->state
= STATE_UNINIT
;
3034 ef4_device_detach_sync(efx
);
3037 ef4_disable_interrupts(efx
);
3045 static int ef4_pm_thaw(struct device
*dev
)
3048 struct ef4_nic
*efx
= pci_get_drvdata(to_pci_dev(dev
));
3052 if (efx
->state
!= STATE_DISABLED
) {
3053 rc
= ef4_enable_interrupts(efx
);
3057 mutex_lock(&efx
->mac_lock
);
3058 efx
->phy_op
->reconfigure(efx
);
3059 mutex_unlock(&efx
->mac_lock
);
3063 netif_device_attach(efx
->net_dev
);
3065 efx
->state
= STATE_READY
;
3067 efx
->type
->resume_wol(efx
);
3072 /* Reschedule any quenched resets scheduled during ef4_pm_freeze() */
3073 queue_work(reset_workqueue
, &efx
->reset_work
);
3083 static int ef4_pm_poweroff(struct device
*dev
)
3085 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
3086 struct ef4_nic
*efx
= pci_get_drvdata(pci_dev
);
3088 efx
->type
->fini(efx
);
3090 efx
->reset_pending
= 0;
3092 pci_save_state(pci_dev
);
3093 return pci_set_power_state(pci_dev
, PCI_D3hot
);
3096 /* Used for both resume and restore */
3097 static int ef4_pm_resume(struct device
*dev
)
3099 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
3100 struct ef4_nic
*efx
= pci_get_drvdata(pci_dev
);
3103 rc
= pci_set_power_state(pci_dev
, PCI_D0
);
3106 pci_restore_state(pci_dev
);
3107 rc
= pci_enable_device(pci_dev
);
3110 pci_set_master(efx
->pci_dev
);
3111 rc
= efx
->type
->reset(efx
, RESET_TYPE_ALL
);
3114 rc
= efx
->type
->init(efx
);
3117 rc
= ef4_pm_thaw(dev
);
3121 static int ef4_pm_suspend(struct device
*dev
)
3126 rc
= ef4_pm_poweroff(dev
);
3132 static const struct dev_pm_ops ef4_pm_ops
= {
3133 .suspend
= ef4_pm_suspend
,
3134 .resume
= ef4_pm_resume
,
3135 .freeze
= ef4_pm_freeze
,
3136 .thaw
= ef4_pm_thaw
,
3137 .poweroff
= ef4_pm_poweroff
,
3138 .restore
= ef4_pm_resume
,
3141 /* A PCI error affecting this device was detected.
3142 * At this point MMIO and DMA may be disabled.
3143 * Stop the software path and request a slot reset.
3145 static pci_ers_result_t
ef4_io_error_detected(struct pci_dev
*pdev
,
3146 enum pci_channel_state state
)
3148 pci_ers_result_t status
= PCI_ERS_RESULT_RECOVERED
;
3149 struct ef4_nic
*efx
= pci_get_drvdata(pdev
);
3151 if (state
== pci_channel_io_perm_failure
)
3152 return PCI_ERS_RESULT_DISCONNECT
;
3156 if (efx
->state
!= STATE_DISABLED
) {
3157 efx
->state
= STATE_RECOVERY
;
3158 efx
->reset_pending
= 0;
3160 ef4_device_detach_sync(efx
);
3163 ef4_disable_interrupts(efx
);
3165 status
= PCI_ERS_RESULT_NEED_RESET
;
3167 /* If the interface is disabled we don't want to do anything
3170 status
= PCI_ERS_RESULT_RECOVERED
;
3175 pci_disable_device(pdev
);
3180 /* Fake a successful reset, which will be performed later in ef4_io_resume. */
3181 static pci_ers_result_t
ef4_io_slot_reset(struct pci_dev
*pdev
)
3183 struct ef4_nic
*efx
= pci_get_drvdata(pdev
);
3184 pci_ers_result_t status
= PCI_ERS_RESULT_RECOVERED
;
3187 if (pci_enable_device(pdev
)) {
3188 netif_err(efx
, hw
, efx
->net_dev
,
3189 "Cannot re-enable PCI device after reset.\n");
3190 status
= PCI_ERS_RESULT_DISCONNECT
;
3193 rc
= pci_cleanup_aer_uncorrect_error_status(pdev
);
3195 netif_err(efx
, hw
, efx
->net_dev
,
3196 "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc
);
3197 /* Non-fatal error. Continue. */
3203 /* Perform the actual reset and resume I/O operations. */
3204 static void ef4_io_resume(struct pci_dev
*pdev
)
3206 struct ef4_nic
*efx
= pci_get_drvdata(pdev
);
3211 if (efx
->state
== STATE_DISABLED
)
3214 rc
= ef4_reset(efx
, RESET_TYPE_ALL
);
3216 netif_err(efx
, hw
, efx
->net_dev
,
3217 "ef4_reset failed after PCI error (%d)\n", rc
);
3219 efx
->state
= STATE_READY
;
3220 netif_dbg(efx
, hw
, efx
->net_dev
,
3221 "Done resetting and resuming IO after PCI error.\n");
3228 /* For simplicity and reliability, we always require a slot reset and try to
3229 * reset the hardware when a pci error affecting the device is detected.
3230 * We leave both the link_reset and mmio_enabled callback unimplemented:
3231 * with our request for slot reset the mmio_enabled callback will never be
3232 * called, and the link_reset callback is not used by AER or EEH mechanisms.
3234 static const struct pci_error_handlers ef4_err_handlers
= {
3235 .error_detected
= ef4_io_error_detected
,
3236 .slot_reset
= ef4_io_slot_reset
,
3237 .resume
= ef4_io_resume
,
3240 static struct pci_driver ef4_pci_driver
= {
3241 .name
= KBUILD_MODNAME
,
3242 .id_table
= ef4_pci_table
,
3243 .probe
= ef4_pci_probe
,
3244 .remove
= ef4_pci_remove
,
3245 .driver
.pm
= &ef4_pm_ops
,
3246 .err_handler
= &ef4_err_handlers
,
3249 /**************************************************************************
3251 * Kernel module interface
3253 *************************************************************************/
3255 module_param(interrupt_mode
, uint
, 0444);
3256 MODULE_PARM_DESC(interrupt_mode
,
3257 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
3259 static int __init
ef4_init_module(void)
3263 printk(KERN_INFO
"Solarflare Falcon driver v" EF4_DRIVER_VERSION
"\n");
3265 rc
= register_netdevice_notifier(&ef4_netdev_notifier
);
3269 reset_workqueue
= create_singlethread_workqueue("sfc_reset");
3270 if (!reset_workqueue
) {
3275 rc
= pci_register_driver(&ef4_pci_driver
);
3282 destroy_workqueue(reset_workqueue
);
3284 unregister_netdevice_notifier(&ef4_netdev_notifier
);
3289 static void __exit
ef4_exit_module(void)
3291 printk(KERN_INFO
"Solarflare Falcon driver unloading\n");
3293 pci_unregister_driver(&ef4_pci_driver
);
3294 destroy_workqueue(reset_workqueue
);
3295 unregister_netdevice_notifier(&ef4_netdev_notifier
);
3299 module_init(ef4_init_module
);
3300 module_exit(ef4_exit_module
);
3302 MODULE_AUTHOR("Solarflare Communications and "
3303 "Michael Brown <mbrown@fensystems.co.uk>");
3304 MODULE_DESCRIPTION("Solarflare Falcon network driver");
3305 MODULE_LICENSE("GPL");
3306 MODULE_DEVICE_TABLE(pci
, ef4_pci_table
);
3307 MODULE_VERSION(EF4_DRIVER_VERSION
);