2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #ifdef CONFIG_RFS_ACCEL
36 #include <linux/cpu_rmap.h>
37 #endif /* CONFIG_RFS_ACCEL */
38 #include <linux/ethtool.h>
39 #include <linux/if_vlan.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/moduleparam.h>
43 #include <linux/numa.h>
44 #include <linux/pci.h>
45 #include <linux/utsname.h>
46 #include <linux/version.h>
47 #include <linux/vmalloc.h>
50 #include "ena_netdev.h"
51 #include "ena_pci_id_tbl.h"
53 static char version
[] = DEVICE_NAME
" v" DRV_MODULE_VERSION
"\n";
55 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
56 MODULE_DESCRIPTION(DEVICE_NAME
);
57 MODULE_LICENSE("GPL");
58 MODULE_VERSION(DRV_MODULE_VERSION
);
60 /* Time in jiffies before concluding the transmitter is hung. */
61 #define TX_TIMEOUT (5 * HZ)
63 #define ENA_NAPI_BUDGET 64
65 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
66 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
67 static int debug
= -1;
68 module_param(debug
, int, 0);
69 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
71 static struct ena_aenq_handlers aenq_handlers
;
73 static struct workqueue_struct
*ena_wq
;
75 MODULE_DEVICE_TABLE(pci
, ena_pci_tbl
);
77 static int ena_rss_init_default(struct ena_adapter
*adapter
);
78 static void check_for_admin_com_state(struct ena_adapter
*adapter
);
79 static void ena_destroy_device(struct ena_adapter
*adapter
);
80 static int ena_restore_device(struct ena_adapter
*adapter
);
82 static void ena_tx_timeout(struct net_device
*dev
)
84 struct ena_adapter
*adapter
= netdev_priv(dev
);
86 /* Change the state of the device to trigger reset
87 * Check that we are not in the middle or a trigger already
90 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))
93 adapter
->reset_reason
= ENA_REGS_RESET_OS_NETDEV_WD
;
94 u64_stats_update_begin(&adapter
->syncp
);
95 adapter
->dev_stats
.tx_timeout
++;
96 u64_stats_update_end(&adapter
->syncp
);
98 netif_err(adapter
, tx_err
, dev
, "Transmit time out\n");
101 static void update_rx_ring_mtu(struct ena_adapter
*adapter
, int mtu
)
105 for (i
= 0; i
< adapter
->num_queues
; i
++)
106 adapter
->rx_ring
[i
].mtu
= mtu
;
109 static int ena_change_mtu(struct net_device
*dev
, int new_mtu
)
111 struct ena_adapter
*adapter
= netdev_priv(dev
);
114 ret
= ena_com_set_dev_mtu(adapter
->ena_dev
, new_mtu
);
116 netif_dbg(adapter
, drv
, dev
, "set MTU to %d\n", new_mtu
);
117 update_rx_ring_mtu(adapter
, new_mtu
);
120 netif_err(adapter
, drv
, dev
, "Failed to set MTU to %d\n",
127 static int ena_init_rx_cpu_rmap(struct ena_adapter
*adapter
)
129 #ifdef CONFIG_RFS_ACCEL
133 adapter
->netdev
->rx_cpu_rmap
= alloc_irq_cpu_rmap(adapter
->num_queues
);
134 if (!adapter
->netdev
->rx_cpu_rmap
)
136 for (i
= 0; i
< adapter
->num_queues
; i
++) {
137 int irq_idx
= ENA_IO_IRQ_IDX(i
);
139 rc
= irq_cpu_rmap_add(adapter
->netdev
->rx_cpu_rmap
,
140 pci_irq_vector(adapter
->pdev
, irq_idx
));
142 free_irq_cpu_rmap(adapter
->netdev
->rx_cpu_rmap
);
143 adapter
->netdev
->rx_cpu_rmap
= NULL
;
147 #endif /* CONFIG_RFS_ACCEL */
151 static void ena_init_io_rings_common(struct ena_adapter
*adapter
,
152 struct ena_ring
*ring
, u16 qid
)
155 ring
->pdev
= adapter
->pdev
;
156 ring
->dev
= &adapter
->pdev
->dev
;
157 ring
->netdev
= adapter
->netdev
;
158 ring
->napi
= &adapter
->ena_napi
[qid
].napi
;
159 ring
->adapter
= adapter
;
160 ring
->ena_dev
= adapter
->ena_dev
;
161 ring
->per_napi_packets
= 0;
162 ring
->per_napi_bytes
= 0;
164 ring
->first_interrupt
= false;
165 ring
->no_interrupt_event_cnt
= 0;
166 u64_stats_init(&ring
->syncp
);
169 static void ena_init_io_rings(struct ena_adapter
*adapter
)
171 struct ena_com_dev
*ena_dev
;
172 struct ena_ring
*txr
, *rxr
;
175 ena_dev
= adapter
->ena_dev
;
177 for (i
= 0; i
< adapter
->num_queues
; i
++) {
178 txr
= &adapter
->tx_ring
[i
];
179 rxr
= &adapter
->rx_ring
[i
];
181 /* TX/RX common ring state */
182 ena_init_io_rings_common(adapter
, txr
, i
);
183 ena_init_io_rings_common(adapter
, rxr
, i
);
185 /* TX specific ring state */
186 txr
->ring_size
= adapter
->tx_ring_size
;
187 txr
->tx_max_header_size
= ena_dev
->tx_max_header_size
;
188 txr
->tx_mem_queue_type
= ena_dev
->tx_mem_queue_type
;
189 txr
->sgl_size
= adapter
->max_tx_sgl_size
;
190 txr
->smoothed_interval
=
191 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev
);
193 /* RX specific ring state */
194 rxr
->ring_size
= adapter
->rx_ring_size
;
195 rxr
->rx_copybreak
= adapter
->rx_copybreak
;
196 rxr
->sgl_size
= adapter
->max_rx_sgl_size
;
197 rxr
->smoothed_interval
=
198 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev
);
199 rxr
->empty_rx_queue
= 0;
203 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
204 * @adapter: network interface device structure
207 * Return 0 on success, negative on failure
209 static int ena_setup_tx_resources(struct ena_adapter
*adapter
, int qid
)
211 struct ena_ring
*tx_ring
= &adapter
->tx_ring
[qid
];
212 struct ena_irq
*ena_irq
= &adapter
->irq_tbl
[ENA_IO_IRQ_IDX(qid
)];
215 if (tx_ring
->tx_buffer_info
) {
216 netif_err(adapter
, ifup
,
217 adapter
->netdev
, "tx_buffer_info info is not NULL");
221 size
= sizeof(struct ena_tx_buffer
) * tx_ring
->ring_size
;
222 node
= cpu_to_node(ena_irq
->cpu
);
224 tx_ring
->tx_buffer_info
= vzalloc_node(size
, node
);
225 if (!tx_ring
->tx_buffer_info
) {
226 tx_ring
->tx_buffer_info
= vzalloc(size
);
227 if (!tx_ring
->tx_buffer_info
)
231 size
= sizeof(u16
) * tx_ring
->ring_size
;
232 tx_ring
->free_tx_ids
= vzalloc_node(size
, node
);
233 if (!tx_ring
->free_tx_ids
) {
234 tx_ring
->free_tx_ids
= vzalloc(size
);
235 if (!tx_ring
->free_tx_ids
) {
236 vfree(tx_ring
->tx_buffer_info
);
241 /* Req id ring for TX out of order completions */
242 for (i
= 0; i
< tx_ring
->ring_size
; i
++)
243 tx_ring
->free_tx_ids
[i
] = i
;
245 /* Reset tx statistics */
246 memset(&tx_ring
->tx_stats
, 0x0, sizeof(tx_ring
->tx_stats
));
248 tx_ring
->next_to_use
= 0;
249 tx_ring
->next_to_clean
= 0;
250 tx_ring
->cpu
= ena_irq
->cpu
;
254 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
255 * @adapter: network interface device structure
258 * Free all transmit software resources
260 static void ena_free_tx_resources(struct ena_adapter
*adapter
, int qid
)
262 struct ena_ring
*tx_ring
= &adapter
->tx_ring
[qid
];
264 vfree(tx_ring
->tx_buffer_info
);
265 tx_ring
->tx_buffer_info
= NULL
;
267 vfree(tx_ring
->free_tx_ids
);
268 tx_ring
->free_tx_ids
= NULL
;
271 /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
272 * @adapter: private structure
274 * Return 0 on success, negative on failure
276 static int ena_setup_all_tx_resources(struct ena_adapter
*adapter
)
280 for (i
= 0; i
< adapter
->num_queues
; i
++) {
281 rc
= ena_setup_tx_resources(adapter
, i
);
290 netif_err(adapter
, ifup
, adapter
->netdev
,
291 "Tx queue %d: allocation failed\n", i
);
293 /* rewind the index freeing the rings as we go */
295 ena_free_tx_resources(adapter
, i
);
299 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
300 * @adapter: board private structure
302 * Free all transmit software resources
304 static void ena_free_all_io_tx_resources(struct ena_adapter
*adapter
)
308 for (i
= 0; i
< adapter
->num_queues
; i
++)
309 ena_free_tx_resources(adapter
, i
);
312 static inline int validate_rx_req_id(struct ena_ring
*rx_ring
, u16 req_id
)
314 if (likely(req_id
< rx_ring
->ring_size
))
317 netif_err(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
318 "Invalid rx req_id: %hu\n", req_id
);
320 u64_stats_update_begin(&rx_ring
->syncp
);
321 rx_ring
->rx_stats
.bad_req_id
++;
322 u64_stats_update_end(&rx_ring
->syncp
);
324 /* Trigger device reset */
325 rx_ring
->adapter
->reset_reason
= ENA_REGS_RESET_INV_RX_REQ_ID
;
326 set_bit(ENA_FLAG_TRIGGER_RESET
, &rx_ring
->adapter
->flags
);
330 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
331 * @adapter: network interface device structure
334 * Returns 0 on success, negative on failure
336 static int ena_setup_rx_resources(struct ena_adapter
*adapter
,
339 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[qid
];
340 struct ena_irq
*ena_irq
= &adapter
->irq_tbl
[ENA_IO_IRQ_IDX(qid
)];
343 if (rx_ring
->rx_buffer_info
) {
344 netif_err(adapter
, ifup
, adapter
->netdev
,
345 "rx_buffer_info is not NULL");
349 /* alloc extra element so in rx path
350 * we can always prefetch rx_info + 1
352 size
= sizeof(struct ena_rx_buffer
) * (rx_ring
->ring_size
+ 1);
353 node
= cpu_to_node(ena_irq
->cpu
);
355 rx_ring
->rx_buffer_info
= vzalloc_node(size
, node
);
356 if (!rx_ring
->rx_buffer_info
) {
357 rx_ring
->rx_buffer_info
= vzalloc(size
);
358 if (!rx_ring
->rx_buffer_info
)
362 size
= sizeof(u16
) * rx_ring
->ring_size
;
363 rx_ring
->free_rx_ids
= vzalloc_node(size
, node
);
364 if (!rx_ring
->free_rx_ids
) {
365 rx_ring
->free_rx_ids
= vzalloc(size
);
366 if (!rx_ring
->free_rx_ids
) {
367 vfree(rx_ring
->rx_buffer_info
);
372 /* Req id ring for receiving RX pkts out of order */
373 for (i
= 0; i
< rx_ring
->ring_size
; i
++)
374 rx_ring
->free_rx_ids
[i
] = i
;
376 /* Reset rx statistics */
377 memset(&rx_ring
->rx_stats
, 0x0, sizeof(rx_ring
->rx_stats
));
379 rx_ring
->next_to_clean
= 0;
380 rx_ring
->next_to_use
= 0;
381 rx_ring
->cpu
= ena_irq
->cpu
;
386 /* ena_free_rx_resources - Free I/O Rx Resources
387 * @adapter: network interface device structure
390 * Free all receive software resources
392 static void ena_free_rx_resources(struct ena_adapter
*adapter
,
395 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[qid
];
397 vfree(rx_ring
->rx_buffer_info
);
398 rx_ring
->rx_buffer_info
= NULL
;
400 vfree(rx_ring
->free_rx_ids
);
401 rx_ring
->free_rx_ids
= NULL
;
404 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
405 * @adapter: board private structure
407 * Return 0 on success, negative on failure
409 static int ena_setup_all_rx_resources(struct ena_adapter
*adapter
)
413 for (i
= 0; i
< adapter
->num_queues
; i
++) {
414 rc
= ena_setup_rx_resources(adapter
, i
);
423 netif_err(adapter
, ifup
, adapter
->netdev
,
424 "Rx queue %d: allocation failed\n", i
);
426 /* rewind the index freeing the rings as we go */
428 ena_free_rx_resources(adapter
, i
);
432 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
433 * @adapter: board private structure
435 * Free all receive software resources
437 static void ena_free_all_io_rx_resources(struct ena_adapter
*adapter
)
441 for (i
= 0; i
< adapter
->num_queues
; i
++)
442 ena_free_rx_resources(adapter
, i
);
445 static inline int ena_alloc_rx_page(struct ena_ring
*rx_ring
,
446 struct ena_rx_buffer
*rx_info
, gfp_t gfp
)
448 struct ena_com_buf
*ena_buf
;
452 /* if previous allocated page is not used */
453 if (unlikely(rx_info
->page
))
456 page
= alloc_page(gfp
);
457 if (unlikely(!page
)) {
458 u64_stats_update_begin(&rx_ring
->syncp
);
459 rx_ring
->rx_stats
.page_alloc_fail
++;
460 u64_stats_update_end(&rx_ring
->syncp
);
464 dma
= dma_map_page(rx_ring
->dev
, page
, 0, PAGE_SIZE
,
466 if (unlikely(dma_mapping_error(rx_ring
->dev
, dma
))) {
467 u64_stats_update_begin(&rx_ring
->syncp
);
468 rx_ring
->rx_stats
.dma_mapping_err
++;
469 u64_stats_update_end(&rx_ring
->syncp
);
474 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
475 "alloc page %p, rx_info %p\n", page
, rx_info
);
477 rx_info
->page
= page
;
478 rx_info
->page_offset
= 0;
479 ena_buf
= &rx_info
->ena_buf
;
480 ena_buf
->paddr
= dma
;
481 ena_buf
->len
= PAGE_SIZE
;
486 static void ena_free_rx_page(struct ena_ring
*rx_ring
,
487 struct ena_rx_buffer
*rx_info
)
489 struct page
*page
= rx_info
->page
;
490 struct ena_com_buf
*ena_buf
= &rx_info
->ena_buf
;
492 if (unlikely(!page
)) {
493 netif_warn(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
494 "Trying to free unallocated buffer\n");
498 dma_unmap_page(rx_ring
->dev
, ena_buf
->paddr
, PAGE_SIZE
,
502 rx_info
->page
= NULL
;
505 static int ena_refill_rx_bufs(struct ena_ring
*rx_ring
, u32 num
)
507 u16 next_to_use
, req_id
;
511 next_to_use
= rx_ring
->next_to_use
;
513 for (i
= 0; i
< num
; i
++) {
514 struct ena_rx_buffer
*rx_info
;
516 req_id
= rx_ring
->free_rx_ids
[next_to_use
];
517 rc
= validate_rx_req_id(rx_ring
, req_id
);
518 if (unlikely(rc
< 0))
521 rx_info
= &rx_ring
->rx_buffer_info
[req_id
];
524 rc
= ena_alloc_rx_page(rx_ring
, rx_info
,
525 GFP_ATOMIC
| __GFP_COMP
);
526 if (unlikely(rc
< 0)) {
527 netif_warn(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
528 "failed to alloc buffer for rx queue %d\n",
532 rc
= ena_com_add_single_rx_desc(rx_ring
->ena_com_io_sq
,
536 netif_warn(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
537 "failed to add buffer for rx queue %d\n",
541 next_to_use
= ENA_RX_RING_IDX_NEXT(next_to_use
,
545 if (unlikely(i
< num
)) {
546 u64_stats_update_begin(&rx_ring
->syncp
);
547 rx_ring
->rx_stats
.refil_partial
++;
548 u64_stats_update_end(&rx_ring
->syncp
);
549 netdev_warn(rx_ring
->netdev
,
550 "refilled rx qid %d with only %d buffers (from %d)\n",
551 rx_ring
->qid
, i
, num
);
555 /* Add memory barrier to make sure the desc were written before
559 ena_com_write_sq_doorbell(rx_ring
->ena_com_io_sq
, true);
563 rx_ring
->next_to_use
= next_to_use
;
568 static void ena_free_rx_bufs(struct ena_adapter
*adapter
,
571 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[qid
];
574 for (i
= 0; i
< rx_ring
->ring_size
; i
++) {
575 struct ena_rx_buffer
*rx_info
= &rx_ring
->rx_buffer_info
[i
];
578 ena_free_rx_page(rx_ring
, rx_info
);
582 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
583 * @adapter: board private structure
586 static void ena_refill_all_rx_bufs(struct ena_adapter
*adapter
)
588 struct ena_ring
*rx_ring
;
591 for (i
= 0; i
< adapter
->num_queues
; i
++) {
592 rx_ring
= &adapter
->rx_ring
[i
];
593 bufs_num
= rx_ring
->ring_size
- 1;
594 rc
= ena_refill_rx_bufs(rx_ring
, bufs_num
);
596 if (unlikely(rc
!= bufs_num
))
597 netif_warn(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
598 "refilling Queue %d failed. allocated %d buffers from: %d\n",
603 static void ena_free_all_rx_bufs(struct ena_adapter
*adapter
)
607 for (i
= 0; i
< adapter
->num_queues
; i
++)
608 ena_free_rx_bufs(adapter
, i
);
611 /* ena_free_tx_bufs - Free Tx Buffers per Queue
612 * @tx_ring: TX ring for which buffers be freed
614 static void ena_free_tx_bufs(struct ena_ring
*tx_ring
)
616 bool print_once
= true;
619 for (i
= 0; i
< tx_ring
->ring_size
; i
++) {
620 struct ena_tx_buffer
*tx_info
= &tx_ring
->tx_buffer_info
[i
];
621 struct ena_com_buf
*ena_buf
;
629 netdev_notice(tx_ring
->netdev
,
630 "free uncompleted tx skb qid %d idx 0x%x\n",
634 netdev_dbg(tx_ring
->netdev
,
635 "free uncompleted tx skb qid %d idx 0x%x\n",
639 ena_buf
= tx_info
->bufs
;
640 dma_unmap_single(tx_ring
->dev
,
645 /* unmap remaining mapped pages */
646 nr_frags
= tx_info
->num_of_bufs
- 1;
647 for (j
= 0; j
< nr_frags
; j
++) {
649 dma_unmap_page(tx_ring
->dev
,
655 dev_kfree_skb_any(tx_info
->skb
);
657 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring
->netdev
,
661 static void ena_free_all_tx_bufs(struct ena_adapter
*adapter
)
663 struct ena_ring
*tx_ring
;
666 for (i
= 0; i
< adapter
->num_queues
; i
++) {
667 tx_ring
= &adapter
->tx_ring
[i
];
668 ena_free_tx_bufs(tx_ring
);
672 static void ena_destroy_all_tx_queues(struct ena_adapter
*adapter
)
677 for (i
= 0; i
< adapter
->num_queues
; i
++) {
678 ena_qid
= ENA_IO_TXQ_IDX(i
);
679 ena_com_destroy_io_queue(adapter
->ena_dev
, ena_qid
);
683 static void ena_destroy_all_rx_queues(struct ena_adapter
*adapter
)
688 for (i
= 0; i
< adapter
->num_queues
; i
++) {
689 ena_qid
= ENA_IO_RXQ_IDX(i
);
690 ena_com_destroy_io_queue(adapter
->ena_dev
, ena_qid
);
694 static void ena_destroy_all_io_queues(struct ena_adapter
*adapter
)
696 ena_destroy_all_tx_queues(adapter
);
697 ena_destroy_all_rx_queues(adapter
);
700 static int validate_tx_req_id(struct ena_ring
*tx_ring
, u16 req_id
)
702 struct ena_tx_buffer
*tx_info
= NULL
;
704 if (likely(req_id
< tx_ring
->ring_size
)) {
705 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
706 if (likely(tx_info
->skb
))
711 netif_err(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
712 "tx_info doesn't have valid skb\n");
714 netif_err(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
715 "Invalid req_id: %hu\n", req_id
);
717 u64_stats_update_begin(&tx_ring
->syncp
);
718 tx_ring
->tx_stats
.bad_req_id
++;
719 u64_stats_update_end(&tx_ring
->syncp
);
721 /* Trigger device reset */
722 tx_ring
->adapter
->reset_reason
= ENA_REGS_RESET_INV_TX_REQ_ID
;
723 set_bit(ENA_FLAG_TRIGGER_RESET
, &tx_ring
->adapter
->flags
);
727 static int ena_clean_tx_irq(struct ena_ring
*tx_ring
, u32 budget
)
729 struct netdev_queue
*txq
;
738 next_to_clean
= tx_ring
->next_to_clean
;
739 txq
= netdev_get_tx_queue(tx_ring
->netdev
, tx_ring
->qid
);
741 while (tx_pkts
< budget
) {
742 struct ena_tx_buffer
*tx_info
;
744 struct ena_com_buf
*ena_buf
;
747 rc
= ena_com_tx_comp_req_id_get(tx_ring
->ena_com_io_cq
,
752 rc
= validate_tx_req_id(tx_ring
, req_id
);
756 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
759 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
763 tx_info
->last_jiffies
= 0;
765 if (likely(tx_info
->num_of_bufs
!= 0)) {
766 ena_buf
= tx_info
->bufs
;
768 dma_unmap_single(tx_ring
->dev
,
769 dma_unmap_addr(ena_buf
, paddr
),
770 dma_unmap_len(ena_buf
, len
),
773 /* unmap remaining mapped pages */
774 nr_frags
= tx_info
->num_of_bufs
- 1;
775 for (i
= 0; i
< nr_frags
; i
++) {
777 dma_unmap_page(tx_ring
->dev
,
778 dma_unmap_addr(ena_buf
, paddr
),
779 dma_unmap_len(ena_buf
, len
),
784 netif_dbg(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
785 "tx_poll: q %d skb %p completed\n", tx_ring
->qid
,
788 tx_bytes
+= skb
->len
;
791 total_done
+= tx_info
->tx_descs
;
793 tx_ring
->free_tx_ids
[next_to_clean
] = req_id
;
794 next_to_clean
= ENA_TX_RING_IDX_NEXT(next_to_clean
,
798 tx_ring
->next_to_clean
= next_to_clean
;
799 ena_com_comp_ack(tx_ring
->ena_com_io_sq
, total_done
);
800 ena_com_update_dev_comp_head(tx_ring
->ena_com_io_cq
);
802 netdev_tx_completed_queue(txq
, tx_pkts
, tx_bytes
);
804 netif_dbg(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
805 "tx_poll: q %d done. total pkts: %d\n",
806 tx_ring
->qid
, tx_pkts
);
808 /* need to make the rings circular update visible to
809 * ena_start_xmit() before checking for netif_queue_stopped().
813 above_thresh
= ena_com_sq_empty_space(tx_ring
->ena_com_io_sq
) >
814 ENA_TX_WAKEUP_THRESH
;
815 if (unlikely(netif_tx_queue_stopped(txq
) && above_thresh
)) {
816 __netif_tx_lock(txq
, smp_processor_id());
817 above_thresh
= ena_com_sq_empty_space(tx_ring
->ena_com_io_sq
) >
818 ENA_TX_WAKEUP_THRESH
;
819 if (netif_tx_queue_stopped(txq
) && above_thresh
) {
820 netif_tx_wake_queue(txq
);
821 u64_stats_update_begin(&tx_ring
->syncp
);
822 tx_ring
->tx_stats
.queue_wakeup
++;
823 u64_stats_update_end(&tx_ring
->syncp
);
825 __netif_tx_unlock(txq
);
828 tx_ring
->per_napi_bytes
+= tx_bytes
;
829 tx_ring
->per_napi_packets
+= tx_pkts
;
834 static struct sk_buff
*ena_alloc_skb(struct ena_ring
*rx_ring
, bool frags
)
839 skb
= napi_get_frags(rx_ring
->napi
);
841 skb
= netdev_alloc_skb_ip_align(rx_ring
->netdev
,
842 rx_ring
->rx_copybreak
);
844 if (unlikely(!skb
)) {
845 u64_stats_update_begin(&rx_ring
->syncp
);
846 rx_ring
->rx_stats
.skb_alloc_fail
++;
847 u64_stats_update_end(&rx_ring
->syncp
);
848 netif_dbg(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
849 "Failed to allocate skb. frags: %d\n", frags
);
856 static struct sk_buff
*ena_rx_skb(struct ena_ring
*rx_ring
,
857 struct ena_com_rx_buf_info
*ena_bufs
,
862 struct ena_rx_buffer
*rx_info
;
863 u16 len
, req_id
, buf
= 0;
866 len
= ena_bufs
[buf
].len
;
867 req_id
= ena_bufs
[buf
].req_id
;
868 rx_info
= &rx_ring
->rx_buffer_info
[req_id
];
870 if (unlikely(!rx_info
->page
)) {
871 netif_err(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
876 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
877 "rx_info %p page %p\n",
878 rx_info
, rx_info
->page
);
880 /* save virt address of first buffer */
881 va
= page_address(rx_info
->page
) + rx_info
->page_offset
;
882 prefetch(va
+ NET_IP_ALIGN
);
884 if (len
<= rx_ring
->rx_copybreak
) {
885 skb
= ena_alloc_skb(rx_ring
, false);
889 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
890 "rx allocated small packet. len %d. data_len %d\n",
891 skb
->len
, skb
->data_len
);
893 /* sync this buffer for CPU use */
894 dma_sync_single_for_cpu(rx_ring
->dev
,
895 dma_unmap_addr(&rx_info
->ena_buf
, paddr
),
898 skb_copy_to_linear_data(skb
, va
, len
);
899 dma_sync_single_for_device(rx_ring
->dev
,
900 dma_unmap_addr(&rx_info
->ena_buf
, paddr
),
905 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
906 rx_ring
->free_rx_ids
[*next_to_clean
] = req_id
;
907 *next_to_clean
= ENA_RX_RING_IDX_ADD(*next_to_clean
, descs
,
912 skb
= ena_alloc_skb(rx_ring
, true);
917 dma_unmap_page(rx_ring
->dev
,
918 dma_unmap_addr(&rx_info
->ena_buf
, paddr
),
919 PAGE_SIZE
, DMA_FROM_DEVICE
);
921 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, rx_info
->page
,
922 rx_info
->page_offset
, len
, PAGE_SIZE
);
924 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
925 "rx skb updated. len %d. data_len %d\n",
926 skb
->len
, skb
->data_len
);
928 rx_info
->page
= NULL
;
930 rx_ring
->free_rx_ids
[*next_to_clean
] = req_id
;
932 ENA_RX_RING_IDX_NEXT(*next_to_clean
,
934 if (likely(--descs
== 0))
938 len
= ena_bufs
[buf
].len
;
939 req_id
= ena_bufs
[buf
].req_id
;
940 rx_info
= &rx_ring
->rx_buffer_info
[req_id
];
946 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
947 * @adapter: structure containing adapter specific data
948 * @ena_rx_ctx: received packet context/metadata
949 * @skb: skb currently being received and modified
951 static inline void ena_rx_checksum(struct ena_ring
*rx_ring
,
952 struct ena_com_rx_ctx
*ena_rx_ctx
,
955 /* Rx csum disabled */
956 if (unlikely(!(rx_ring
->netdev
->features
& NETIF_F_RXCSUM
))) {
957 skb
->ip_summed
= CHECKSUM_NONE
;
961 /* For fragmented packets the checksum isn't valid */
962 if (ena_rx_ctx
->frag
) {
963 skb
->ip_summed
= CHECKSUM_NONE
;
967 /* if IP and error */
968 if (unlikely((ena_rx_ctx
->l3_proto
== ENA_ETH_IO_L3_PROTO_IPV4
) &&
969 (ena_rx_ctx
->l3_csum_err
))) {
970 /* ipv4 checksum error */
971 skb
->ip_summed
= CHECKSUM_NONE
;
972 u64_stats_update_begin(&rx_ring
->syncp
);
973 rx_ring
->rx_stats
.bad_csum
++;
974 u64_stats_update_end(&rx_ring
->syncp
);
975 netif_dbg(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
976 "RX IPv4 header checksum error\n");
981 if (likely((ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_TCP
) ||
982 (ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_UDP
))) {
983 if (unlikely(ena_rx_ctx
->l4_csum_err
)) {
984 /* TCP/UDP checksum error */
985 u64_stats_update_begin(&rx_ring
->syncp
);
986 rx_ring
->rx_stats
.bad_csum
++;
987 u64_stats_update_end(&rx_ring
->syncp
);
988 netif_dbg(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
989 "RX L4 checksum error\n");
990 skb
->ip_summed
= CHECKSUM_NONE
;
994 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
998 static void ena_set_rx_hash(struct ena_ring
*rx_ring
,
999 struct ena_com_rx_ctx
*ena_rx_ctx
,
1000 struct sk_buff
*skb
)
1002 enum pkt_hash_types hash_type
;
1004 if (likely(rx_ring
->netdev
->features
& NETIF_F_RXHASH
)) {
1005 if (likely((ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_TCP
) ||
1006 (ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_UDP
)))
1008 hash_type
= PKT_HASH_TYPE_L4
;
1010 hash_type
= PKT_HASH_TYPE_NONE
;
1012 /* Override hash type if the packet is fragmented */
1013 if (ena_rx_ctx
->frag
)
1014 hash_type
= PKT_HASH_TYPE_NONE
;
1016 skb_set_hash(skb
, ena_rx_ctx
->hash
, hash_type
);
1020 /* ena_clean_rx_irq - Cleanup RX irq
1021 * @rx_ring: RX ring to clean
1022 * @napi: napi handler
1023 * @budget: how many packets driver is allowed to clean
1025 * Returns the number of cleaned buffers.
1027 static int ena_clean_rx_irq(struct ena_ring
*rx_ring
, struct napi_struct
*napi
,
1030 u16 next_to_clean
= rx_ring
->next_to_clean
;
1031 u32 res_budget
, work_done
;
1033 struct ena_com_rx_ctx ena_rx_ctx
;
1034 struct ena_adapter
*adapter
;
1035 struct sk_buff
*skb
;
1036 int refill_required
;
1037 int refill_threshold
;
1040 int rx_copybreak_pkt
= 0;
1043 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1044 "%s qid %d\n", __func__
, rx_ring
->qid
);
1045 res_budget
= budget
;
1048 ena_rx_ctx
.ena_bufs
= rx_ring
->ena_bufs
;
1049 ena_rx_ctx
.max_bufs
= rx_ring
->sgl_size
;
1050 ena_rx_ctx
.descs
= 0;
1051 rc
= ena_com_rx_pkt(rx_ring
->ena_com_io_cq
,
1052 rx_ring
->ena_com_io_sq
,
1057 if (unlikely(ena_rx_ctx
.descs
== 0))
1060 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1061 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1062 rx_ring
->qid
, ena_rx_ctx
.descs
, ena_rx_ctx
.l3_proto
,
1063 ena_rx_ctx
.l4_proto
, ena_rx_ctx
.hash
);
1065 /* allocate skb and fill it */
1066 skb
= ena_rx_skb(rx_ring
, rx_ring
->ena_bufs
, ena_rx_ctx
.descs
,
1069 /* exit if we failed to retrieve a buffer */
1070 if (unlikely(!skb
)) {
1071 for (i
= 0; i
< ena_rx_ctx
.descs
; i
++) {
1072 rx_ring
->free_tx_ids
[next_to_clean
] =
1073 rx_ring
->ena_bufs
[i
].req_id
;
1075 ENA_RX_RING_IDX_NEXT(next_to_clean
,
1076 rx_ring
->ring_size
);
1081 ena_rx_checksum(rx_ring
, &ena_rx_ctx
, skb
);
1083 ena_set_rx_hash(rx_ring
, &ena_rx_ctx
, skb
);
1085 skb_record_rx_queue(skb
, rx_ring
->qid
);
1087 if (rx_ring
->ena_bufs
[0].len
<= rx_ring
->rx_copybreak
) {
1088 total_len
+= rx_ring
->ena_bufs
[0].len
;
1090 napi_gro_receive(napi
, skb
);
1092 total_len
+= skb
->len
;
1093 napi_gro_frags(napi
);
1097 } while (likely(res_budget
));
1099 work_done
= budget
- res_budget
;
1100 rx_ring
->per_napi_bytes
+= total_len
;
1101 rx_ring
->per_napi_packets
+= work_done
;
1102 u64_stats_update_begin(&rx_ring
->syncp
);
1103 rx_ring
->rx_stats
.bytes
+= total_len
;
1104 rx_ring
->rx_stats
.cnt
+= work_done
;
1105 rx_ring
->rx_stats
.rx_copybreak_pkt
+= rx_copybreak_pkt
;
1106 u64_stats_update_end(&rx_ring
->syncp
);
1108 rx_ring
->next_to_clean
= next_to_clean
;
1110 refill_required
= ena_com_sq_empty_space(rx_ring
->ena_com_io_sq
);
1111 refill_threshold
= rx_ring
->ring_size
/ ENA_RX_REFILL_THRESH_DIVIDER
;
1113 /* Optimization, try to batch new rx buffers */
1114 if (refill_required
> refill_threshold
) {
1115 ena_com_update_dev_comp_head(rx_ring
->ena_com_io_cq
);
1116 ena_refill_rx_bufs(rx_ring
, refill_required
);
1122 adapter
= netdev_priv(rx_ring
->netdev
);
1124 u64_stats_update_begin(&rx_ring
->syncp
);
1125 rx_ring
->rx_stats
.bad_desc_num
++;
1126 u64_stats_update_end(&rx_ring
->syncp
);
1128 /* Too many desc from the device. Trigger reset */
1129 adapter
->reset_reason
= ENA_REGS_RESET_TOO_MANY_RX_DESCS
;
1130 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
1135 inline void ena_adjust_intr_moderation(struct ena_ring
*rx_ring
,
1136 struct ena_ring
*tx_ring
)
1138 /* We apply adaptive moderation on Rx path only.
1139 * Tx uses static interrupt moderation.
1141 ena_com_calculate_interrupt_delay(rx_ring
->ena_dev
,
1142 rx_ring
->per_napi_packets
,
1143 rx_ring
->per_napi_bytes
,
1144 &rx_ring
->smoothed_interval
,
1145 &rx_ring
->moder_tbl_idx
);
1147 /* Reset per napi packets/bytes */
1148 tx_ring
->per_napi_packets
= 0;
1149 tx_ring
->per_napi_bytes
= 0;
1150 rx_ring
->per_napi_packets
= 0;
1151 rx_ring
->per_napi_bytes
= 0;
1154 static inline void ena_unmask_interrupt(struct ena_ring
*tx_ring
,
1155 struct ena_ring
*rx_ring
)
1157 struct ena_eth_io_intr_reg intr_reg
;
1159 /* Update intr register: rx intr delay,
1160 * tx intr delay and interrupt unmask
1162 ena_com_update_intr_reg(&intr_reg
,
1163 rx_ring
->smoothed_interval
,
1164 tx_ring
->smoothed_interval
,
1167 /* It is a shared MSI-X.
1168 * Tx and Rx CQ have pointer to it.
1169 * So we use one of them to reach the intr reg
1171 ena_com_unmask_intr(rx_ring
->ena_com_io_cq
, &intr_reg
);
1174 static inline void ena_update_ring_numa_node(struct ena_ring
*tx_ring
,
1175 struct ena_ring
*rx_ring
)
1177 int cpu
= get_cpu();
1180 /* Check only one ring since the 2 rings are running on the same cpu */
1181 if (likely(tx_ring
->cpu
== cpu
))
1184 numa_node
= cpu_to_node(cpu
);
1187 if (numa_node
!= NUMA_NO_NODE
) {
1188 ena_com_update_numa_node(tx_ring
->ena_com_io_cq
, numa_node
);
1189 ena_com_update_numa_node(rx_ring
->ena_com_io_cq
, numa_node
);
1200 static int ena_io_poll(struct napi_struct
*napi
, int budget
)
1202 struct ena_napi
*ena_napi
= container_of(napi
, struct ena_napi
, napi
);
1203 struct ena_ring
*tx_ring
, *rx_ring
;
1208 int napi_comp_call
= 0;
1211 tx_ring
= ena_napi
->tx_ring
;
1212 rx_ring
= ena_napi
->rx_ring
;
1214 tx_budget
= tx_ring
->ring_size
/ ENA_TX_POLL_BUDGET_DIVIDER
;
1216 if (!test_bit(ENA_FLAG_DEV_UP
, &tx_ring
->adapter
->flags
) ||
1217 test_bit(ENA_FLAG_TRIGGER_RESET
, &tx_ring
->adapter
->flags
)) {
1218 napi_complete_done(napi
, 0);
1222 tx_work_done
= ena_clean_tx_irq(tx_ring
, tx_budget
);
1223 rx_work_done
= ena_clean_rx_irq(rx_ring
, napi
, budget
);
1225 /* If the device is about to reset or down, avoid unmask
1226 * the interrupt and return 0 so NAPI won't reschedule
1228 if (unlikely(!test_bit(ENA_FLAG_DEV_UP
, &tx_ring
->adapter
->flags
) ||
1229 test_bit(ENA_FLAG_TRIGGER_RESET
, &tx_ring
->adapter
->flags
))) {
1230 napi_complete_done(napi
, 0);
1233 } else if ((budget
> rx_work_done
) && (tx_budget
> tx_work_done
)) {
1236 /* Update numa and unmask the interrupt only when schedule
1237 * from the interrupt context (vs from sk_busy_loop)
1239 if (napi_complete_done(napi
, rx_work_done
)) {
1240 /* Tx and Rx share the same interrupt vector */
1241 if (ena_com_get_adaptive_moderation_enabled(rx_ring
->ena_dev
))
1242 ena_adjust_intr_moderation(rx_ring
, tx_ring
);
1244 ena_unmask_interrupt(tx_ring
, rx_ring
);
1247 ena_update_ring_numa_node(tx_ring
, rx_ring
);
1254 u64_stats_update_begin(&tx_ring
->syncp
);
1255 tx_ring
->tx_stats
.napi_comp
+= napi_comp_call
;
1256 tx_ring
->tx_stats
.tx_poll
++;
1257 u64_stats_update_end(&tx_ring
->syncp
);
1262 static irqreturn_t
ena_intr_msix_mgmnt(int irq
, void *data
)
1264 struct ena_adapter
*adapter
= (struct ena_adapter
*)data
;
1266 ena_com_admin_q_comp_intr_handler(adapter
->ena_dev
);
1268 /* Don't call the aenq handler before probe is done */
1269 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
)))
1270 ena_com_aenq_intr_handler(adapter
->ena_dev
, data
);
1275 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1276 * @irq: interrupt number
1277 * @data: pointer to a network interface private napi device structure
1279 static irqreturn_t
ena_intr_msix_io(int irq
, void *data
)
1281 struct ena_napi
*ena_napi
= data
;
1283 ena_napi
->tx_ring
->first_interrupt
= true;
1284 ena_napi
->rx_ring
->first_interrupt
= true;
1286 napi_schedule_irqoff(&ena_napi
->napi
);
1291 /* Reserve a single MSI-X vector for management (admin + aenq).
1292 * plus reserve one vector for each potential io queue.
1293 * the number of potential io queues is the minimum of what the device
1294 * supports and the number of vCPUs.
1296 static int ena_enable_msix(struct ena_adapter
*adapter
, int num_queues
)
1298 int msix_vecs
, irq_cnt
;
1300 if (test_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
)) {
1301 netif_err(adapter
, probe
, adapter
->netdev
,
1302 "Error, MSI-X is already enabled\n");
1306 /* Reserved the max msix vectors we might need */
1307 msix_vecs
= ENA_MAX_MSIX_VEC(num_queues
);
1309 netif_dbg(adapter
, probe
, adapter
->netdev
,
1310 "trying to enable MSI-X, vectors %d\n", msix_vecs
);
1312 irq_cnt
= pci_alloc_irq_vectors(adapter
->pdev
, ENA_MIN_MSIX_VEC
,
1313 msix_vecs
, PCI_IRQ_MSIX
);
1316 netif_err(adapter
, probe
, adapter
->netdev
,
1317 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt
);
1321 if (irq_cnt
!= msix_vecs
) {
1322 netif_notice(adapter
, probe
, adapter
->netdev
,
1323 "enable only %d MSI-X (out of %d), reduce the number of queues\n",
1324 irq_cnt
, msix_vecs
);
1325 adapter
->num_queues
= irq_cnt
- ENA_ADMIN_MSIX_VEC
;
1328 if (ena_init_rx_cpu_rmap(adapter
))
1329 netif_warn(adapter
, probe
, adapter
->netdev
,
1330 "Failed to map IRQs to CPUs\n");
1332 adapter
->msix_vecs
= irq_cnt
;
1333 set_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
);
1338 static void ena_setup_mgmnt_intr(struct ena_adapter
*adapter
)
1342 snprintf(adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].name
,
1343 ENA_IRQNAME_SIZE
, "ena-mgmnt@pci:%s",
1344 pci_name(adapter
->pdev
));
1345 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].handler
=
1346 ena_intr_msix_mgmnt
;
1347 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].data
= adapter
;
1348 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].vector
=
1349 pci_irq_vector(adapter
->pdev
, ENA_MGMNT_IRQ_IDX
);
1350 cpu
= cpumask_first(cpu_online_mask
);
1351 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].cpu
= cpu
;
1352 cpumask_set_cpu(cpu
,
1353 &adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].affinity_hint_mask
);
1356 static void ena_setup_io_intr(struct ena_adapter
*adapter
)
1358 struct net_device
*netdev
;
1359 int irq_idx
, i
, cpu
;
1361 netdev
= adapter
->netdev
;
1363 for (i
= 0; i
< adapter
->num_queues
; i
++) {
1364 irq_idx
= ENA_IO_IRQ_IDX(i
);
1365 cpu
= i
% num_online_cpus();
1367 snprintf(adapter
->irq_tbl
[irq_idx
].name
, ENA_IRQNAME_SIZE
,
1368 "%s-Tx-Rx-%d", netdev
->name
, i
);
1369 adapter
->irq_tbl
[irq_idx
].handler
= ena_intr_msix_io
;
1370 adapter
->irq_tbl
[irq_idx
].data
= &adapter
->ena_napi
[i
];
1371 adapter
->irq_tbl
[irq_idx
].vector
=
1372 pci_irq_vector(adapter
->pdev
, irq_idx
);
1373 adapter
->irq_tbl
[irq_idx
].cpu
= cpu
;
1375 cpumask_set_cpu(cpu
,
1376 &adapter
->irq_tbl
[irq_idx
].affinity_hint_mask
);
1380 static int ena_request_mgmnt_irq(struct ena_adapter
*adapter
)
1382 unsigned long flags
= 0;
1383 struct ena_irq
*irq
;
1386 irq
= &adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
];
1387 rc
= request_irq(irq
->vector
, irq
->handler
, flags
, irq
->name
,
1390 netif_err(adapter
, probe
, adapter
->netdev
,
1391 "failed to request admin irq\n");
1395 netif_dbg(adapter
, probe
, adapter
->netdev
,
1396 "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
1397 irq
->affinity_hint_mask
.bits
[0], irq
->vector
);
1399 irq_set_affinity_hint(irq
->vector
, &irq
->affinity_hint_mask
);
1404 static int ena_request_io_irq(struct ena_adapter
*adapter
)
1406 unsigned long flags
= 0;
1407 struct ena_irq
*irq
;
1410 if (!test_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
)) {
1411 netif_err(adapter
, ifup
, adapter
->netdev
,
1412 "Failed to request I/O IRQ: MSI-X is not enabled\n");
1416 for (i
= ENA_IO_IRQ_FIRST_IDX
; i
< adapter
->msix_vecs
; i
++) {
1417 irq
= &adapter
->irq_tbl
[i
];
1418 rc
= request_irq(irq
->vector
, irq
->handler
, flags
, irq
->name
,
1421 netif_err(adapter
, ifup
, adapter
->netdev
,
1422 "Failed to request I/O IRQ. index %d rc %d\n",
1427 netif_dbg(adapter
, ifup
, adapter
->netdev
,
1428 "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
1429 i
, irq
->affinity_hint_mask
.bits
[0], irq
->vector
);
1431 irq_set_affinity_hint(irq
->vector
, &irq
->affinity_hint_mask
);
1437 for (k
= ENA_IO_IRQ_FIRST_IDX
; k
< i
; k
++) {
1438 irq
= &adapter
->irq_tbl
[k
];
1439 free_irq(irq
->vector
, irq
->data
);
1445 static void ena_free_mgmnt_irq(struct ena_adapter
*adapter
)
1447 struct ena_irq
*irq
;
1449 irq
= &adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
];
1450 synchronize_irq(irq
->vector
);
1451 irq_set_affinity_hint(irq
->vector
, NULL
);
1452 free_irq(irq
->vector
, irq
->data
);
1455 static void ena_free_io_irq(struct ena_adapter
*adapter
)
1457 struct ena_irq
*irq
;
1460 #ifdef CONFIG_RFS_ACCEL
1461 if (adapter
->msix_vecs
>= 1) {
1462 free_irq_cpu_rmap(adapter
->netdev
->rx_cpu_rmap
);
1463 adapter
->netdev
->rx_cpu_rmap
= NULL
;
1465 #endif /* CONFIG_RFS_ACCEL */
1467 for (i
= ENA_IO_IRQ_FIRST_IDX
; i
< adapter
->msix_vecs
; i
++) {
1468 irq
= &adapter
->irq_tbl
[i
];
1469 irq_set_affinity_hint(irq
->vector
, NULL
);
1470 free_irq(irq
->vector
, irq
->data
);
1474 static void ena_disable_msix(struct ena_adapter
*adapter
)
1476 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
))
1477 pci_free_irq_vectors(adapter
->pdev
);
1480 static void ena_disable_io_intr_sync(struct ena_adapter
*adapter
)
1484 if (!netif_running(adapter
->netdev
))
1487 for (i
= ENA_IO_IRQ_FIRST_IDX
; i
< adapter
->msix_vecs
; i
++)
1488 synchronize_irq(adapter
->irq_tbl
[i
].vector
);
1491 static void ena_del_napi(struct ena_adapter
*adapter
)
1495 for (i
= 0; i
< adapter
->num_queues
; i
++)
1496 netif_napi_del(&adapter
->ena_napi
[i
].napi
);
1499 static void ena_init_napi(struct ena_adapter
*adapter
)
1501 struct ena_napi
*napi
;
1504 for (i
= 0; i
< adapter
->num_queues
; i
++) {
1505 napi
= &adapter
->ena_napi
[i
];
1507 netif_napi_add(adapter
->netdev
,
1508 &adapter
->ena_napi
[i
].napi
,
1511 napi
->rx_ring
= &adapter
->rx_ring
[i
];
1512 napi
->tx_ring
= &adapter
->tx_ring
[i
];
1517 static void ena_napi_disable_all(struct ena_adapter
*adapter
)
1521 for (i
= 0; i
< adapter
->num_queues
; i
++)
1522 napi_disable(&adapter
->ena_napi
[i
].napi
);
1525 static void ena_napi_enable_all(struct ena_adapter
*adapter
)
1529 for (i
= 0; i
< adapter
->num_queues
; i
++)
1530 napi_enable(&adapter
->ena_napi
[i
].napi
);
1533 static void ena_restore_ethtool_params(struct ena_adapter
*adapter
)
1535 adapter
->tx_usecs
= 0;
1536 adapter
->rx_usecs
= 0;
1537 adapter
->tx_frames
= 1;
1538 adapter
->rx_frames
= 1;
1541 /* Configure the Rx forwarding */
1542 static int ena_rss_configure(struct ena_adapter
*adapter
)
1544 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
1547 /* In case the RSS table wasn't initialized by probe */
1548 if (!ena_dev
->rss
.tbl_log_size
) {
1549 rc
= ena_rss_init_default(adapter
);
1550 if (rc
&& (rc
!= -EOPNOTSUPP
)) {
1551 netif_err(adapter
, ifup
, adapter
->netdev
,
1552 "Failed to init RSS rc: %d\n", rc
);
1557 /* Set indirect table */
1558 rc
= ena_com_indirect_table_set(ena_dev
);
1559 if (unlikely(rc
&& rc
!= -EOPNOTSUPP
))
1562 /* Configure hash function (if supported) */
1563 rc
= ena_com_set_hash_function(ena_dev
);
1564 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
)))
1567 /* Configure hash inputs (if supported) */
1568 rc
= ena_com_set_hash_ctrl(ena_dev
);
1569 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
)))
1575 static int ena_up_complete(struct ena_adapter
*adapter
)
1579 rc
= ena_rss_configure(adapter
);
1583 ena_init_napi(adapter
);
1585 ena_change_mtu(adapter
->netdev
, adapter
->netdev
->mtu
);
1587 ena_refill_all_rx_bufs(adapter
);
1589 /* enable transmits */
1590 netif_tx_start_all_queues(adapter
->netdev
);
1592 ena_restore_ethtool_params(adapter
);
1594 ena_napi_enable_all(adapter
);
1599 static int ena_create_io_tx_queue(struct ena_adapter
*adapter
, int qid
)
1601 struct ena_com_create_io_ctx ctx
= { 0 };
1602 struct ena_com_dev
*ena_dev
;
1603 struct ena_ring
*tx_ring
;
1608 ena_dev
= adapter
->ena_dev
;
1610 tx_ring
= &adapter
->tx_ring
[qid
];
1611 msix_vector
= ENA_IO_IRQ_IDX(qid
);
1612 ena_qid
= ENA_IO_TXQ_IDX(qid
);
1614 ctx
.direction
= ENA_COM_IO_QUEUE_DIRECTION_TX
;
1616 ctx
.mem_queue_type
= ena_dev
->tx_mem_queue_type
;
1617 ctx
.msix_vector
= msix_vector
;
1618 ctx
.queue_size
= adapter
->tx_ring_size
;
1619 ctx
.numa_node
= cpu_to_node(tx_ring
->cpu
);
1621 rc
= ena_com_create_io_queue(ena_dev
, &ctx
);
1623 netif_err(adapter
, ifup
, adapter
->netdev
,
1624 "Failed to create I/O TX queue num %d rc: %d\n",
1629 rc
= ena_com_get_io_handlers(ena_dev
, ena_qid
,
1630 &tx_ring
->ena_com_io_sq
,
1631 &tx_ring
->ena_com_io_cq
);
1633 netif_err(adapter
, ifup
, adapter
->netdev
,
1634 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
1636 ena_com_destroy_io_queue(ena_dev
, ena_qid
);
1640 ena_com_update_numa_node(tx_ring
->ena_com_io_cq
, ctx
.numa_node
);
1644 static int ena_create_all_io_tx_queues(struct ena_adapter
*adapter
)
1646 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
1649 for (i
= 0; i
< adapter
->num_queues
; i
++) {
1650 rc
= ena_create_io_tx_queue(adapter
, i
);
1659 ena_com_destroy_io_queue(ena_dev
, ENA_IO_TXQ_IDX(i
));
1664 static int ena_create_io_rx_queue(struct ena_adapter
*adapter
, int qid
)
1666 struct ena_com_dev
*ena_dev
;
1667 struct ena_com_create_io_ctx ctx
= { 0 };
1668 struct ena_ring
*rx_ring
;
1673 ena_dev
= adapter
->ena_dev
;
1675 rx_ring
= &adapter
->rx_ring
[qid
];
1676 msix_vector
= ENA_IO_IRQ_IDX(qid
);
1677 ena_qid
= ENA_IO_RXQ_IDX(qid
);
1680 ctx
.direction
= ENA_COM_IO_QUEUE_DIRECTION_RX
;
1681 ctx
.mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
1682 ctx
.msix_vector
= msix_vector
;
1683 ctx
.queue_size
= adapter
->rx_ring_size
;
1684 ctx
.numa_node
= cpu_to_node(rx_ring
->cpu
);
1686 rc
= ena_com_create_io_queue(ena_dev
, &ctx
);
1688 netif_err(adapter
, ifup
, adapter
->netdev
,
1689 "Failed to create I/O RX queue num %d rc: %d\n",
1694 rc
= ena_com_get_io_handlers(ena_dev
, ena_qid
,
1695 &rx_ring
->ena_com_io_sq
,
1696 &rx_ring
->ena_com_io_cq
);
1698 netif_err(adapter
, ifup
, adapter
->netdev
,
1699 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
1701 ena_com_destroy_io_queue(ena_dev
, ena_qid
);
1705 ena_com_update_numa_node(rx_ring
->ena_com_io_cq
, ctx
.numa_node
);
1710 static int ena_create_all_io_rx_queues(struct ena_adapter
*adapter
)
1712 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
1715 for (i
= 0; i
< adapter
->num_queues
; i
++) {
1716 rc
= ena_create_io_rx_queue(adapter
, i
);
1725 ena_com_destroy_io_queue(ena_dev
, ENA_IO_RXQ_IDX(i
));
1730 static int ena_up(struct ena_adapter
*adapter
)
1734 netdev_dbg(adapter
->netdev
, "%s\n", __func__
);
1736 ena_setup_io_intr(adapter
);
1738 rc
= ena_request_io_irq(adapter
);
1742 /* allocate transmit descriptors */
1743 rc
= ena_setup_all_tx_resources(adapter
);
1747 /* allocate receive descriptors */
1748 rc
= ena_setup_all_rx_resources(adapter
);
1752 /* Create TX queues */
1753 rc
= ena_create_all_io_tx_queues(adapter
);
1755 goto err_create_tx_queues
;
1757 /* Create RX queues */
1758 rc
= ena_create_all_io_rx_queues(adapter
);
1760 goto err_create_rx_queues
;
1762 rc
= ena_up_complete(adapter
);
1766 if (test_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
))
1767 netif_carrier_on(adapter
->netdev
);
1769 u64_stats_update_begin(&adapter
->syncp
);
1770 adapter
->dev_stats
.interface_up
++;
1771 u64_stats_update_end(&adapter
->syncp
);
1773 set_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
1775 /* Enable completion queues interrupt */
1776 for (i
= 0; i
< adapter
->num_queues
; i
++)
1777 ena_unmask_interrupt(&adapter
->tx_ring
[i
],
1778 &adapter
->rx_ring
[i
]);
1780 /* schedule napi in case we had pending packets
1781 * from the last time we disable napi
1783 for (i
= 0; i
< adapter
->num_queues
; i
++)
1784 napi_schedule(&adapter
->ena_napi
[i
].napi
);
1789 ena_destroy_all_rx_queues(adapter
);
1790 err_create_rx_queues
:
1791 ena_destroy_all_tx_queues(adapter
);
1792 err_create_tx_queues
:
1793 ena_free_all_io_rx_resources(adapter
);
1795 ena_free_all_io_tx_resources(adapter
);
1797 ena_free_io_irq(adapter
);
1803 static void ena_down(struct ena_adapter
*adapter
)
1805 netif_info(adapter
, ifdown
, adapter
->netdev
, "%s\n", __func__
);
1807 clear_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
1809 u64_stats_update_begin(&adapter
->syncp
);
1810 adapter
->dev_stats
.interface_down
++;
1811 u64_stats_update_end(&adapter
->syncp
);
1813 netif_carrier_off(adapter
->netdev
);
1814 netif_tx_disable(adapter
->netdev
);
1816 /* After this point the napi handler won't enable the tx queue */
1817 ena_napi_disable_all(adapter
);
1819 /* After destroy the queue there won't be any new interrupts */
1821 if (test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
)) {
1824 rc
= ena_com_dev_reset(adapter
->ena_dev
, adapter
->reset_reason
);
1826 dev_err(&adapter
->pdev
->dev
, "Device reset failed\n");
1829 ena_destroy_all_io_queues(adapter
);
1831 ena_disable_io_intr_sync(adapter
);
1832 ena_free_io_irq(adapter
);
1833 ena_del_napi(adapter
);
1835 ena_free_all_tx_bufs(adapter
);
1836 ena_free_all_rx_bufs(adapter
);
1837 ena_free_all_io_tx_resources(adapter
);
1838 ena_free_all_io_rx_resources(adapter
);
1841 /* ena_open - Called when a network interface is made active
1842 * @netdev: network interface device structure
1844 * Returns 0 on success, negative value on failure
1846 * The open entry point is called when a network interface is made
1847 * active by the system (IFF_UP). At this point all resources needed
1848 * for transmit and receive operations are allocated, the interrupt
1849 * handler is registered with the OS, the watchdog timer is started,
1850 * and the stack is notified that the interface is ready.
1852 static int ena_open(struct net_device
*netdev
)
1854 struct ena_adapter
*adapter
= netdev_priv(netdev
);
1857 /* Notify the stack of the actual queue counts. */
1858 rc
= netif_set_real_num_tx_queues(netdev
, adapter
->num_queues
);
1860 netif_err(adapter
, ifup
, netdev
, "Can't set num tx queues\n");
1864 rc
= netif_set_real_num_rx_queues(netdev
, adapter
->num_queues
);
1866 netif_err(adapter
, ifup
, netdev
, "Can't set num rx queues\n");
1870 rc
= ena_up(adapter
);
1877 /* ena_close - Disables a network interface
1878 * @netdev: network interface device structure
1880 * Returns 0, this is not allowed to fail
1882 * The close entry point is called when an interface is de-activated
1883 * by the OS. The hardware is still under the drivers control, but
1884 * needs to be disabled. A global MAC reset is issued to stop the
1885 * hardware, and all transmit and receive resources are freed.
1887 static int ena_close(struct net_device
*netdev
)
1889 struct ena_adapter
*adapter
= netdev_priv(netdev
);
1891 netif_dbg(adapter
, ifdown
, netdev
, "%s\n", __func__
);
1893 if (test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
1896 /* Check for device status and issue reset if needed*/
1897 check_for_admin_com_state(adapter
);
1898 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
1899 netif_err(adapter
, ifdown
, adapter
->netdev
,
1900 "Destroy failure, restarting device\n");
1901 ena_dump_stats_to_dmesg(adapter
);
1902 /* rtnl lock already obtained in dev_ioctl() layer */
1903 ena_destroy_device(adapter
);
1904 ena_restore_device(adapter
);
1910 static void ena_tx_csum(struct ena_com_tx_ctx
*ena_tx_ctx
, struct sk_buff
*skb
)
1912 u32 mss
= skb_shinfo(skb
)->gso_size
;
1913 struct ena_com_tx_meta
*ena_meta
= &ena_tx_ctx
->ena_meta
;
1916 if ((skb
->ip_summed
== CHECKSUM_PARTIAL
) || mss
) {
1917 ena_tx_ctx
->l4_csum_enable
= 1;
1919 ena_tx_ctx
->tso_enable
= 1;
1920 ena_meta
->l4_hdr_len
= tcp_hdr(skb
)->doff
;
1921 ena_tx_ctx
->l4_csum_partial
= 0;
1923 ena_tx_ctx
->tso_enable
= 0;
1924 ena_meta
->l4_hdr_len
= 0;
1925 ena_tx_ctx
->l4_csum_partial
= 1;
1928 switch (ip_hdr(skb
)->version
) {
1930 ena_tx_ctx
->l3_proto
= ENA_ETH_IO_L3_PROTO_IPV4
;
1931 if (ip_hdr(skb
)->frag_off
& htons(IP_DF
))
1934 ena_tx_ctx
->l3_csum_enable
= 1;
1935 l4_protocol
= ip_hdr(skb
)->protocol
;
1938 ena_tx_ctx
->l3_proto
= ENA_ETH_IO_L3_PROTO_IPV6
;
1939 l4_protocol
= ipv6_hdr(skb
)->nexthdr
;
1945 if (l4_protocol
== IPPROTO_TCP
)
1946 ena_tx_ctx
->l4_proto
= ENA_ETH_IO_L4_PROTO_TCP
;
1948 ena_tx_ctx
->l4_proto
= ENA_ETH_IO_L4_PROTO_UDP
;
1950 ena_meta
->mss
= mss
;
1951 ena_meta
->l3_hdr_len
= skb_network_header_len(skb
);
1952 ena_meta
->l3_hdr_offset
= skb_network_offset(skb
);
1953 ena_tx_ctx
->meta_valid
= 1;
1956 ena_tx_ctx
->meta_valid
= 0;
1960 static int ena_check_and_linearize_skb(struct ena_ring
*tx_ring
,
1961 struct sk_buff
*skb
)
1963 int num_frags
, header_len
, rc
;
1965 num_frags
= skb_shinfo(skb
)->nr_frags
;
1966 header_len
= skb_headlen(skb
);
1968 if (num_frags
< tx_ring
->sgl_size
)
1971 if ((num_frags
== tx_ring
->sgl_size
) &&
1972 (header_len
< tx_ring
->tx_max_header_size
))
1975 u64_stats_update_begin(&tx_ring
->syncp
);
1976 tx_ring
->tx_stats
.linearize
++;
1977 u64_stats_update_end(&tx_ring
->syncp
);
1979 rc
= skb_linearize(skb
);
1981 u64_stats_update_begin(&tx_ring
->syncp
);
1982 tx_ring
->tx_stats
.linearize_failed
++;
1983 u64_stats_update_end(&tx_ring
->syncp
);
1989 /* Called with netif_tx_lock. */
1990 static netdev_tx_t
ena_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1992 struct ena_adapter
*adapter
= netdev_priv(dev
);
1993 struct ena_tx_buffer
*tx_info
;
1994 struct ena_com_tx_ctx ena_tx_ctx
;
1995 struct ena_ring
*tx_ring
;
1996 struct netdev_queue
*txq
;
1997 struct ena_com_buf
*ena_buf
;
2005 int qid
, rc
, nb_hw_desc
;
2008 netif_dbg(adapter
, tx_queued
, dev
, "%s skb %p\n", __func__
, skb
);
2009 /* Determine which tx ring we will be placed on */
2010 qid
= skb_get_queue_mapping(skb
);
2011 tx_ring
= &adapter
->tx_ring
[qid
];
2012 txq
= netdev_get_tx_queue(dev
, qid
);
2014 rc
= ena_check_and_linearize_skb(tx_ring
, skb
);
2016 goto error_drop_packet
;
2018 skb_tx_timestamp(skb
);
2019 len
= skb_headlen(skb
);
2021 next_to_use
= tx_ring
->next_to_use
;
2022 req_id
= tx_ring
->free_tx_ids
[next_to_use
];
2023 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
2024 tx_info
->num_of_bufs
= 0;
2026 WARN(tx_info
->skb
, "SKB isn't NULL req_id %d\n", req_id
);
2027 ena_buf
= tx_info
->bufs
;
2030 if (tx_ring
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
2031 /* prepared the push buffer */
2032 push_len
= min_t(u32
, len
, tx_ring
->tx_max_header_size
);
2033 header_len
= push_len
;
2034 push_hdr
= skb
->data
;
2037 header_len
= min_t(u32
, len
, tx_ring
->tx_max_header_size
);
2041 netif_dbg(adapter
, tx_queued
, dev
,
2042 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb
,
2043 push_hdr
, push_len
);
2045 if (len
> push_len
) {
2046 dma
= dma_map_single(tx_ring
->dev
, skb
->data
+ push_len
,
2047 len
- push_len
, DMA_TO_DEVICE
);
2048 if (dma_mapping_error(tx_ring
->dev
, dma
))
2049 goto error_report_dma_error
;
2051 ena_buf
->paddr
= dma
;
2052 ena_buf
->len
= len
- push_len
;
2055 tx_info
->num_of_bufs
++;
2058 last_frag
= skb_shinfo(skb
)->nr_frags
;
2060 for (i
= 0; i
< last_frag
; i
++) {
2061 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2063 len
= skb_frag_size(frag
);
2064 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, 0, len
,
2066 if (dma_mapping_error(tx_ring
->dev
, dma
))
2067 goto error_report_dma_error
;
2069 ena_buf
->paddr
= dma
;
2074 tx_info
->num_of_bufs
+= last_frag
;
2076 memset(&ena_tx_ctx
, 0x0, sizeof(struct ena_com_tx_ctx
));
2077 ena_tx_ctx
.ena_bufs
= tx_info
->bufs
;
2078 ena_tx_ctx
.push_header
= push_hdr
;
2079 ena_tx_ctx
.num_bufs
= tx_info
->num_of_bufs
;
2080 ena_tx_ctx
.req_id
= req_id
;
2081 ena_tx_ctx
.header_len
= header_len
;
2083 /* set flags and meta data */
2084 ena_tx_csum(&ena_tx_ctx
, skb
);
2086 /* prepare the packet's descriptors to dma engine */
2087 rc
= ena_com_prepare_tx(tx_ring
->ena_com_io_sq
, &ena_tx_ctx
,
2091 netif_err(adapter
, tx_queued
, dev
,
2092 "failed to prepare tx bufs\n");
2093 u64_stats_update_begin(&tx_ring
->syncp
);
2094 tx_ring
->tx_stats
.queue_stop
++;
2095 tx_ring
->tx_stats
.prepare_ctx_err
++;
2096 u64_stats_update_end(&tx_ring
->syncp
);
2097 netif_tx_stop_queue(txq
);
2098 goto error_unmap_dma
;
2101 netdev_tx_sent_queue(txq
, skb
->len
);
2103 u64_stats_update_begin(&tx_ring
->syncp
);
2104 tx_ring
->tx_stats
.cnt
++;
2105 tx_ring
->tx_stats
.bytes
+= skb
->len
;
2106 u64_stats_update_end(&tx_ring
->syncp
);
2108 tx_info
->tx_descs
= nb_hw_desc
;
2109 tx_info
->last_jiffies
= jiffies
;
2110 tx_info
->print_once
= 0;
2112 tx_ring
->next_to_use
= ENA_TX_RING_IDX_NEXT(next_to_use
,
2113 tx_ring
->ring_size
);
2115 /* This WMB is aimed to:
2116 * 1 - perform smp barrier before reading next_to_completion
2117 * 2 - make sure the desc were written before trigger DB
2121 /* stop the queue when no more space available, the packet can have up
2122 * to sgl_size + 2. one for the meta descriptor and one for header
2123 * (if the header is larger than tx_max_header_size).
2125 if (unlikely(ena_com_sq_empty_space(tx_ring
->ena_com_io_sq
) <
2126 (tx_ring
->sgl_size
+ 2))) {
2127 netif_dbg(adapter
, tx_queued
, dev
, "%s stop queue %d\n",
2130 netif_tx_stop_queue(txq
);
2131 u64_stats_update_begin(&tx_ring
->syncp
);
2132 tx_ring
->tx_stats
.queue_stop
++;
2133 u64_stats_update_end(&tx_ring
->syncp
);
2135 /* There is a rare condition where this function decide to
2136 * stop the queue but meanwhile clean_tx_irq updates
2137 * next_to_completion and terminates.
2138 * The queue will remain stopped forever.
2139 * To solve this issue this function perform rmb, check
2140 * the wakeup condition and wake up the queue if needed.
2144 if (ena_com_sq_empty_space(tx_ring
->ena_com_io_sq
)
2145 > ENA_TX_WAKEUP_THRESH
) {
2146 netif_tx_wake_queue(txq
);
2147 u64_stats_update_begin(&tx_ring
->syncp
);
2148 tx_ring
->tx_stats
.queue_wakeup
++;
2149 u64_stats_update_end(&tx_ring
->syncp
);
2153 if (netif_xmit_stopped(txq
) || !skb
->xmit_more
) {
2154 /* trigger the dma engine */
2155 ena_com_write_sq_doorbell(tx_ring
->ena_com_io_sq
, false);
2156 u64_stats_update_begin(&tx_ring
->syncp
);
2157 tx_ring
->tx_stats
.doorbells
++;
2158 u64_stats_update_end(&tx_ring
->syncp
);
2161 return NETDEV_TX_OK
;
2163 error_report_dma_error
:
2164 u64_stats_update_begin(&tx_ring
->syncp
);
2165 tx_ring
->tx_stats
.dma_mapping_err
++;
2166 u64_stats_update_end(&tx_ring
->syncp
);
2167 netdev_warn(adapter
->netdev
, "failed to map skb\n");
2169 tx_info
->skb
= NULL
;
2173 /* save value of frag that failed */
2176 /* start back at beginning and unmap skb */
2177 tx_info
->skb
= NULL
;
2178 ena_buf
= tx_info
->bufs
;
2179 dma_unmap_single(tx_ring
->dev
, dma_unmap_addr(ena_buf
, paddr
),
2180 dma_unmap_len(ena_buf
, len
), DMA_TO_DEVICE
);
2182 /* unmap remaining mapped pages */
2183 for (i
= 0; i
< last_frag
; i
++) {
2185 dma_unmap_page(tx_ring
->dev
, dma_unmap_addr(ena_buf
, paddr
),
2186 dma_unmap_len(ena_buf
, len
), DMA_TO_DEVICE
);
2193 return NETDEV_TX_OK
;
2196 #ifdef CONFIG_NET_POLL_CONTROLLER
2197 static void ena_netpoll(struct net_device
*netdev
)
2199 struct ena_adapter
*adapter
= netdev_priv(netdev
);
2202 /* Dont schedule NAPI if the driver is in the middle of reset
2203 * or netdev is down.
2206 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
) ||
2207 test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))
2210 for (i
= 0; i
< adapter
->num_queues
; i
++)
2211 napi_schedule(&adapter
->ena_napi
[i
].napi
);
2213 #endif /* CONFIG_NET_POLL_CONTROLLER */
2215 static u16
ena_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
2216 struct net_device
*sb_dev
,
2217 select_queue_fallback_t fallback
)
2220 /* we suspect that this is good for in--kernel network services that
2221 * want to loop incoming skb rx to tx in normal user generated traffic,
2222 * most probably we will not get to this
2224 if (skb_rx_queue_recorded(skb
))
2225 qid
= skb_get_rx_queue(skb
);
2227 qid
= fallback(dev
, skb
, NULL
);
2232 static void ena_config_host_info(struct ena_com_dev
*ena_dev
)
2234 struct ena_admin_host_info
*host_info
;
2237 /* Allocate only the host info */
2238 rc
= ena_com_allocate_host_info(ena_dev
);
2240 pr_err("Cannot allocate host info\n");
2244 host_info
= ena_dev
->host_attr
.host_info
;
2246 host_info
->os_type
= ENA_ADMIN_OS_LINUX
;
2247 host_info
->kernel_ver
= LINUX_VERSION_CODE
;
2248 strncpy(host_info
->kernel_ver_str
, utsname()->version
,
2249 sizeof(host_info
->kernel_ver_str
) - 1);
2250 host_info
->os_dist
= 0;
2251 strncpy(host_info
->os_dist_str
, utsname()->release
,
2252 sizeof(host_info
->os_dist_str
) - 1);
2253 host_info
->driver_version
=
2254 (DRV_MODULE_VER_MAJOR
) |
2255 (DRV_MODULE_VER_MINOR
<< ENA_ADMIN_HOST_INFO_MINOR_SHIFT
) |
2256 (DRV_MODULE_VER_SUBMINOR
<< ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT
);
2258 rc
= ena_com_set_host_attributes(ena_dev
);
2260 if (rc
== -EOPNOTSUPP
)
2261 pr_warn("Cannot set host attributes\n");
2263 pr_err("Cannot set host attributes\n");
2271 ena_com_delete_host_info(ena_dev
);
2274 static void ena_config_debug_area(struct ena_adapter
*adapter
)
2276 u32 debug_area_size
;
2279 ss_count
= ena_get_sset_count(adapter
->netdev
, ETH_SS_STATS
);
2280 if (ss_count
<= 0) {
2281 netif_err(adapter
, drv
, adapter
->netdev
,
2282 "SS count is negative\n");
2286 /* allocate 32 bytes for each string and 64bit for the value */
2287 debug_area_size
= ss_count
* ETH_GSTRING_LEN
+ sizeof(u64
) * ss_count
;
2289 rc
= ena_com_allocate_debug_area(adapter
->ena_dev
, debug_area_size
);
2291 pr_err("Cannot allocate debug area\n");
2295 rc
= ena_com_set_host_attributes(adapter
->ena_dev
);
2297 if (rc
== -EOPNOTSUPP
)
2298 netif_warn(adapter
, drv
, adapter
->netdev
,
2299 "Cannot set host attributes\n");
2301 netif_err(adapter
, drv
, adapter
->netdev
,
2302 "Cannot set host attributes\n");
2308 ena_com_delete_debug_area(adapter
->ena_dev
);
2311 static void ena_get_stats64(struct net_device
*netdev
,
2312 struct rtnl_link_stats64
*stats
)
2314 struct ena_adapter
*adapter
= netdev_priv(netdev
);
2315 struct ena_ring
*rx_ring
, *tx_ring
;
2320 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
2323 for (i
= 0; i
< adapter
->num_queues
; i
++) {
2326 tx_ring
= &adapter
->tx_ring
[i
];
2329 start
= u64_stats_fetch_begin_irq(&tx_ring
->syncp
);
2330 packets
= tx_ring
->tx_stats
.cnt
;
2331 bytes
= tx_ring
->tx_stats
.bytes
;
2332 } while (u64_stats_fetch_retry_irq(&tx_ring
->syncp
, start
));
2334 stats
->tx_packets
+= packets
;
2335 stats
->tx_bytes
+= bytes
;
2337 rx_ring
= &adapter
->rx_ring
[i
];
2340 start
= u64_stats_fetch_begin_irq(&rx_ring
->syncp
);
2341 packets
= rx_ring
->rx_stats
.cnt
;
2342 bytes
= rx_ring
->rx_stats
.bytes
;
2343 } while (u64_stats_fetch_retry_irq(&rx_ring
->syncp
, start
));
2345 stats
->rx_packets
+= packets
;
2346 stats
->rx_bytes
+= bytes
;
2350 start
= u64_stats_fetch_begin_irq(&adapter
->syncp
);
2351 rx_drops
= adapter
->dev_stats
.rx_drops
;
2352 } while (u64_stats_fetch_retry_irq(&adapter
->syncp
, start
));
2354 stats
->rx_dropped
= rx_drops
;
2356 stats
->multicast
= 0;
2357 stats
->collisions
= 0;
2359 stats
->rx_length_errors
= 0;
2360 stats
->rx_crc_errors
= 0;
2361 stats
->rx_frame_errors
= 0;
2362 stats
->rx_fifo_errors
= 0;
2363 stats
->rx_missed_errors
= 0;
2364 stats
->tx_window_errors
= 0;
2366 stats
->rx_errors
= 0;
2367 stats
->tx_errors
= 0;
2370 static const struct net_device_ops ena_netdev_ops
= {
2371 .ndo_open
= ena_open
,
2372 .ndo_stop
= ena_close
,
2373 .ndo_start_xmit
= ena_start_xmit
,
2374 .ndo_select_queue
= ena_select_queue
,
2375 .ndo_get_stats64
= ena_get_stats64
,
2376 .ndo_tx_timeout
= ena_tx_timeout
,
2377 .ndo_change_mtu
= ena_change_mtu
,
2378 .ndo_set_mac_address
= NULL
,
2379 .ndo_validate_addr
= eth_validate_addr
,
2380 #ifdef CONFIG_NET_POLL_CONTROLLER
2381 .ndo_poll_controller
= ena_netpoll
,
2382 #endif /* CONFIG_NET_POLL_CONTROLLER */
2385 static int ena_device_validate_params(struct ena_adapter
*adapter
,
2386 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
2388 struct net_device
*netdev
= adapter
->netdev
;
2391 rc
= ether_addr_equal(get_feat_ctx
->dev_attr
.mac_addr
,
2394 netif_err(adapter
, drv
, netdev
,
2395 "Error, mac address are different\n");
2399 if ((get_feat_ctx
->max_queues
.max_cq_num
< adapter
->num_queues
) ||
2400 (get_feat_ctx
->max_queues
.max_sq_num
< adapter
->num_queues
)) {
2401 netif_err(adapter
, drv
, netdev
,
2402 "Error, device doesn't support enough queues\n");
2406 if (get_feat_ctx
->dev_attr
.max_mtu
< netdev
->mtu
) {
2407 netif_err(adapter
, drv
, netdev
,
2408 "Error, device max mtu is smaller than netdev MTU\n");
2415 static int ena_device_init(struct ena_com_dev
*ena_dev
, struct pci_dev
*pdev
,
2416 struct ena_com_dev_get_features_ctx
*get_feat_ctx
,
2419 struct device
*dev
= &pdev
->dev
;
2420 bool readless_supported
;
2425 rc
= ena_com_mmio_reg_read_request_init(ena_dev
);
2427 dev_err(dev
, "failed to init mmio read less\n");
2431 /* The PCIe configuration space revision id indicate if mmio reg
2434 readless_supported
= !(pdev
->revision
& ENA_MMIO_DISABLE_REG_READ
);
2435 ena_com_set_mmio_read_mode(ena_dev
, readless_supported
);
2437 rc
= ena_com_dev_reset(ena_dev
, ENA_REGS_RESET_NORMAL
);
2439 dev_err(dev
, "Can not reset device\n");
2440 goto err_mmio_read_less
;
2443 rc
= ena_com_validate_version(ena_dev
);
2445 dev_err(dev
, "device version is too low\n");
2446 goto err_mmio_read_less
;
2449 dma_width
= ena_com_get_dma_width(ena_dev
);
2450 if (dma_width
< 0) {
2451 dev_err(dev
, "Invalid dma width value %d", dma_width
);
2453 goto err_mmio_read_less
;
2456 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(dma_width
));
2458 dev_err(dev
, "pci_set_dma_mask failed 0x%x\n", rc
);
2459 goto err_mmio_read_less
;
2462 rc
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(dma_width
));
2464 dev_err(dev
, "err_pci_set_consistent_dma_mask failed 0x%x\n",
2466 goto err_mmio_read_less
;
2469 /* ENA admin level init */
2470 rc
= ena_com_admin_init(ena_dev
, &aenq_handlers
, true);
2473 "Can not initialize ena admin queue with device\n");
2474 goto err_mmio_read_less
;
2477 /* To enable the msix interrupts the driver needs to know the number
2478 * of queues. So the driver uses polling mode to retrieve this
2481 ena_com_set_admin_polling_mode(ena_dev
, true);
2483 ena_config_host_info(ena_dev
);
2485 /* Get Device Attributes*/
2486 rc
= ena_com_get_dev_attr_feat(ena_dev
, get_feat_ctx
);
2488 dev_err(dev
, "Cannot get attribute for ena device rc=%d\n", rc
);
2489 goto err_admin_init
;
2492 /* Try to turn all the available aenq groups */
2493 aenq_groups
= BIT(ENA_ADMIN_LINK_CHANGE
) |
2494 BIT(ENA_ADMIN_FATAL_ERROR
) |
2495 BIT(ENA_ADMIN_WARNING
) |
2496 BIT(ENA_ADMIN_NOTIFICATION
) |
2497 BIT(ENA_ADMIN_KEEP_ALIVE
);
2499 aenq_groups
&= get_feat_ctx
->aenq
.supported_groups
;
2501 rc
= ena_com_set_aenq_config(ena_dev
, aenq_groups
);
2503 dev_err(dev
, "Cannot configure aenq groups rc= %d\n", rc
);
2504 goto err_admin_init
;
2507 *wd_state
= !!(aenq_groups
& BIT(ENA_ADMIN_KEEP_ALIVE
));
2512 ena_com_delete_host_info(ena_dev
);
2513 ena_com_admin_destroy(ena_dev
);
2515 ena_com_mmio_reg_read_request_destroy(ena_dev
);
2520 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter
*adapter
,
2523 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2524 struct device
*dev
= &adapter
->pdev
->dev
;
2527 rc
= ena_enable_msix(adapter
, io_vectors
);
2529 dev_err(dev
, "Can not reserve msix vectors\n");
2533 ena_setup_mgmnt_intr(adapter
);
2535 rc
= ena_request_mgmnt_irq(adapter
);
2537 dev_err(dev
, "Can not setup management interrupts\n");
2538 goto err_disable_msix
;
2541 ena_com_set_admin_polling_mode(ena_dev
, false);
2543 ena_com_admin_aenq_enable(ena_dev
);
2548 ena_disable_msix(adapter
);
2553 static void ena_destroy_device(struct ena_adapter
*adapter
)
2555 struct net_device
*netdev
= adapter
->netdev
;
2556 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2559 netif_carrier_off(netdev
);
2561 del_timer_sync(&adapter
->timer_service
);
2563 dev_up
= test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
2564 adapter
->dev_up_before_reset
= dev_up
;
2566 ena_com_set_admin_running_state(ena_dev
, false);
2568 if (test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
2571 /* Before releasing the ENA resources, a device reset is required.
2572 * (to prevent the device from accessing them).
2573 * In case the reset flag is set and the device is up, ena_down()
2574 * already perform the reset, so it can be skipped.
2576 if (!(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
) && dev_up
))
2577 ena_com_dev_reset(adapter
->ena_dev
, adapter
->reset_reason
);
2579 ena_free_mgmnt_irq(adapter
);
2581 ena_disable_msix(adapter
);
2583 ena_com_abort_admin_commands(ena_dev
);
2585 ena_com_wait_for_abort_completion(ena_dev
);
2587 ena_com_admin_destroy(ena_dev
);
2589 ena_com_mmio_reg_read_request_destroy(ena_dev
);
2591 adapter
->reset_reason
= ENA_REGS_RESET_NORMAL
;
2593 clear_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
2596 static int ena_restore_device(struct ena_adapter
*adapter
)
2598 struct ena_com_dev_get_features_ctx get_feat_ctx
;
2599 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2600 struct pci_dev
*pdev
= adapter
->pdev
;
2604 set_bit(ENA_FLAG_ONGOING_RESET
, &adapter
->flags
);
2605 rc
= ena_device_init(ena_dev
, adapter
->pdev
, &get_feat_ctx
, &wd_state
);
2607 dev_err(&pdev
->dev
, "Can not initialize device\n");
2610 adapter
->wd_state
= wd_state
;
2612 rc
= ena_device_validate_params(adapter
, &get_feat_ctx
);
2614 dev_err(&pdev
->dev
, "Validation of device parameters failed\n");
2615 goto err_device_destroy
;
2618 clear_bit(ENA_FLAG_ONGOING_RESET
, &adapter
->flags
);
2619 /* Make sure we don't have a race with AENQ Links state handler */
2620 if (test_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
))
2621 netif_carrier_on(adapter
->netdev
);
2623 rc
= ena_enable_msix_and_set_admin_interrupts(adapter
,
2624 adapter
->num_queues
);
2626 dev_err(&pdev
->dev
, "Enable MSI-X failed\n");
2627 goto err_device_destroy
;
2629 /* If the interface was up before the reset bring it up */
2630 if (adapter
->dev_up_before_reset
) {
2631 rc
= ena_up(adapter
);
2633 dev_err(&pdev
->dev
, "Failed to create I/O queues\n");
2634 goto err_disable_msix
;
2638 mod_timer(&adapter
->timer_service
, round_jiffies(jiffies
+ HZ
));
2639 dev_err(&pdev
->dev
, "Device reset completed successfully\n");
2643 ena_free_mgmnt_irq(adapter
);
2644 ena_disable_msix(adapter
);
2646 ena_com_admin_destroy(ena_dev
);
2648 clear_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
);
2649 clear_bit(ENA_FLAG_ONGOING_RESET
, &adapter
->flags
);
2651 "Reset attempt failed. Can not reset the device\n");
2656 static void ena_fw_reset_device(struct work_struct
*work
)
2658 struct ena_adapter
*adapter
=
2659 container_of(work
, struct ena_adapter
, reset_task
);
2660 struct pci_dev
*pdev
= adapter
->pdev
;
2662 if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
2664 "device reset schedule while reset bit is off\n");
2668 ena_destroy_device(adapter
);
2669 ena_restore_device(adapter
);
2673 static int check_for_rx_interrupt_queue(struct ena_adapter
*adapter
,
2674 struct ena_ring
*rx_ring
)
2676 if (likely(rx_ring
->first_interrupt
))
2679 if (ena_com_cq_empty(rx_ring
->ena_com_io_cq
))
2682 rx_ring
->no_interrupt_event_cnt
++;
2684 if (rx_ring
->no_interrupt_event_cnt
== ENA_MAX_NO_INTERRUPT_ITERATIONS
) {
2685 netif_err(adapter
, rx_err
, adapter
->netdev
,
2686 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
2688 adapter
->reset_reason
= ENA_REGS_RESET_MISS_INTERRUPT
;
2689 smp_mb__before_atomic();
2690 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
2697 static int check_missing_comp_in_tx_queue(struct ena_adapter
*adapter
,
2698 struct ena_ring
*tx_ring
)
2700 struct ena_tx_buffer
*tx_buf
;
2701 unsigned long last_jiffies
;
2705 for (i
= 0; i
< tx_ring
->ring_size
; i
++) {
2706 tx_buf
= &tx_ring
->tx_buffer_info
[i
];
2707 last_jiffies
= tx_buf
->last_jiffies
;
2709 if (last_jiffies
== 0)
2710 /* no pending Tx at this location */
2713 if (unlikely(!tx_ring
->first_interrupt
&& time_is_before_jiffies(last_jiffies
+
2714 2 * adapter
->missing_tx_completion_to
))) {
2715 /* If after graceful period interrupt is still not
2716 * received, we schedule a reset
2718 netif_err(adapter
, tx_err
, adapter
->netdev
,
2719 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
2721 adapter
->reset_reason
= ENA_REGS_RESET_MISS_INTERRUPT
;
2722 smp_mb__before_atomic();
2723 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
2727 if (unlikely(time_is_before_jiffies(last_jiffies
+
2728 adapter
->missing_tx_completion_to
))) {
2729 if (!tx_buf
->print_once
)
2730 netif_notice(adapter
, tx_err
, adapter
->netdev
,
2731 "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
2734 tx_buf
->print_once
= 1;
2739 if (unlikely(missed_tx
> adapter
->missing_tx_completion_threshold
)) {
2740 netif_err(adapter
, tx_err
, adapter
->netdev
,
2741 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
2743 adapter
->missing_tx_completion_threshold
);
2744 adapter
->reset_reason
=
2745 ENA_REGS_RESET_MISS_TX_CMPL
;
2746 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
2750 u64_stats_update_begin(&tx_ring
->syncp
);
2751 tx_ring
->tx_stats
.missed_tx
= missed_tx
;
2752 u64_stats_update_end(&tx_ring
->syncp
);
2757 static void check_for_missing_completions(struct ena_adapter
*adapter
)
2759 struct ena_ring
*tx_ring
;
2760 struct ena_ring
*rx_ring
;
2763 /* Make sure the driver doesn't turn the device in other process */
2766 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
2769 if (test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))
2772 if (adapter
->missing_tx_completion_to
== ENA_HW_HINTS_NO_TIMEOUT
)
2775 budget
= ENA_MONITORED_TX_QUEUES
;
2777 for (i
= adapter
->last_monitored_tx_qid
; i
< adapter
->num_queues
; i
++) {
2778 tx_ring
= &adapter
->tx_ring
[i
];
2779 rx_ring
= &adapter
->rx_ring
[i
];
2781 rc
= check_missing_comp_in_tx_queue(adapter
, tx_ring
);
2785 rc
= check_for_rx_interrupt_queue(adapter
, rx_ring
);
2794 adapter
->last_monitored_tx_qid
= i
% adapter
->num_queues
;
2797 /* trigger napi schedule after 2 consecutive detections */
2798 #define EMPTY_RX_REFILL 2
2799 /* For the rare case where the device runs out of Rx descriptors and the
2800 * napi handler failed to refill new Rx descriptors (due to a lack of memory
2802 * This case will lead to a deadlock:
2803 * The device won't send interrupts since all the new Rx packets will be dropped
2804 * The napi handler won't allocate new Rx descriptors so the device will be
2805 * able to send new packets.
2807 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
2808 * It is recommended to have at least 512MB, with a minimum of 128MB for
2809 * constrained environment).
2811 * When such a situation is detected - Reschedule napi
2813 static void check_for_empty_rx_ring(struct ena_adapter
*adapter
)
2815 struct ena_ring
*rx_ring
;
2816 int i
, refill_required
;
2818 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
2821 if (test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))
2824 for (i
= 0; i
< adapter
->num_queues
; i
++) {
2825 rx_ring
= &adapter
->rx_ring
[i
];
2828 ena_com_sq_empty_space(rx_ring
->ena_com_io_sq
);
2829 if (unlikely(refill_required
== (rx_ring
->ring_size
- 1))) {
2830 rx_ring
->empty_rx_queue
++;
2832 if (rx_ring
->empty_rx_queue
>= EMPTY_RX_REFILL
) {
2833 u64_stats_update_begin(&rx_ring
->syncp
);
2834 rx_ring
->rx_stats
.empty_rx_ring
++;
2835 u64_stats_update_end(&rx_ring
->syncp
);
2837 netif_err(adapter
, drv
, adapter
->netdev
,
2838 "trigger refill for ring %d\n", i
);
2840 napi_schedule(rx_ring
->napi
);
2841 rx_ring
->empty_rx_queue
= 0;
2844 rx_ring
->empty_rx_queue
= 0;
2849 /* Check for keep alive expiration */
2850 static void check_for_missing_keep_alive(struct ena_adapter
*adapter
)
2852 unsigned long keep_alive_expired
;
2854 if (!adapter
->wd_state
)
2857 if (adapter
->keep_alive_timeout
== ENA_HW_HINTS_NO_TIMEOUT
)
2860 keep_alive_expired
= round_jiffies(adapter
->last_keep_alive_jiffies
+
2861 adapter
->keep_alive_timeout
);
2862 if (unlikely(time_is_before_jiffies(keep_alive_expired
))) {
2863 netif_err(adapter
, drv
, adapter
->netdev
,
2864 "Keep alive watchdog timeout.\n");
2865 u64_stats_update_begin(&adapter
->syncp
);
2866 adapter
->dev_stats
.wd_expired
++;
2867 u64_stats_update_end(&adapter
->syncp
);
2868 adapter
->reset_reason
= ENA_REGS_RESET_KEEP_ALIVE_TO
;
2869 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
2873 static void check_for_admin_com_state(struct ena_adapter
*adapter
)
2875 if (unlikely(!ena_com_get_admin_running_state(adapter
->ena_dev
))) {
2876 netif_err(adapter
, drv
, adapter
->netdev
,
2877 "ENA admin queue is not in running state!\n");
2878 u64_stats_update_begin(&adapter
->syncp
);
2879 adapter
->dev_stats
.admin_q_pause
++;
2880 u64_stats_update_end(&adapter
->syncp
);
2881 adapter
->reset_reason
= ENA_REGS_RESET_ADMIN_TO
;
2882 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
2886 static void ena_update_hints(struct ena_adapter
*adapter
,
2887 struct ena_admin_ena_hw_hints
*hints
)
2889 struct net_device
*netdev
= adapter
->netdev
;
2891 if (hints
->admin_completion_tx_timeout
)
2892 adapter
->ena_dev
->admin_queue
.completion_timeout
=
2893 hints
->admin_completion_tx_timeout
* 1000;
2895 if (hints
->mmio_read_timeout
)
2896 /* convert to usec */
2897 adapter
->ena_dev
->mmio_read
.reg_read_to
=
2898 hints
->mmio_read_timeout
* 1000;
2900 if (hints
->missed_tx_completion_count_threshold_to_reset
)
2901 adapter
->missing_tx_completion_threshold
=
2902 hints
->missed_tx_completion_count_threshold_to_reset
;
2904 if (hints
->missing_tx_completion_timeout
) {
2905 if (hints
->missing_tx_completion_timeout
== ENA_HW_HINTS_NO_TIMEOUT
)
2906 adapter
->missing_tx_completion_to
= ENA_HW_HINTS_NO_TIMEOUT
;
2908 adapter
->missing_tx_completion_to
=
2909 msecs_to_jiffies(hints
->missing_tx_completion_timeout
);
2912 if (hints
->netdev_wd_timeout
)
2913 netdev
->watchdog_timeo
= msecs_to_jiffies(hints
->netdev_wd_timeout
);
2915 if (hints
->driver_watchdog_timeout
) {
2916 if (hints
->driver_watchdog_timeout
== ENA_HW_HINTS_NO_TIMEOUT
)
2917 adapter
->keep_alive_timeout
= ENA_HW_HINTS_NO_TIMEOUT
;
2919 adapter
->keep_alive_timeout
=
2920 msecs_to_jiffies(hints
->driver_watchdog_timeout
);
2924 static void ena_update_host_info(struct ena_admin_host_info
*host_info
,
2925 struct net_device
*netdev
)
2927 host_info
->supported_network_features
[0] =
2928 netdev
->features
& GENMASK_ULL(31, 0);
2929 host_info
->supported_network_features
[1] =
2930 (netdev
->features
& GENMASK_ULL(63, 32)) >> 32;
2933 static void ena_timer_service(struct timer_list
*t
)
2935 struct ena_adapter
*adapter
= from_timer(adapter
, t
, timer_service
);
2936 u8
*debug_area
= adapter
->ena_dev
->host_attr
.debug_area_virt_addr
;
2937 struct ena_admin_host_info
*host_info
=
2938 adapter
->ena_dev
->host_attr
.host_info
;
2940 check_for_missing_keep_alive(adapter
);
2942 check_for_admin_com_state(adapter
);
2944 check_for_missing_completions(adapter
);
2946 check_for_empty_rx_ring(adapter
);
2949 ena_dump_stats_to_buf(adapter
, debug_area
);
2952 ena_update_host_info(host_info
, adapter
->netdev
);
2954 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
2955 netif_err(adapter
, drv
, adapter
->netdev
,
2956 "Trigger reset is on\n");
2957 ena_dump_stats_to_dmesg(adapter
);
2958 queue_work(ena_wq
, &adapter
->reset_task
);
2962 /* Reset the timer */
2963 mod_timer(&adapter
->timer_service
, jiffies
+ HZ
);
2966 static int ena_calc_io_queue_num(struct pci_dev
*pdev
,
2967 struct ena_com_dev
*ena_dev
,
2968 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
2970 int io_sq_num
, io_queue_num
;
2972 /* In case of LLQ use the llq number in the get feature cmd */
2973 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
2974 io_sq_num
= get_feat_ctx
->max_queues
.max_llq_num
;
2976 if (io_sq_num
== 0) {
2978 "Trying to use LLQ but llq_num is 0. Fall back into regular queues\n");
2980 ena_dev
->tx_mem_queue_type
=
2981 ENA_ADMIN_PLACEMENT_POLICY_HOST
;
2982 io_sq_num
= get_feat_ctx
->max_queues
.max_sq_num
;
2985 io_sq_num
= get_feat_ctx
->max_queues
.max_sq_num
;
2988 io_queue_num
= min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES
);
2989 io_queue_num
= min_t(int, io_queue_num
, io_sq_num
);
2990 io_queue_num
= min_t(int, io_queue_num
,
2991 get_feat_ctx
->max_queues
.max_cq_num
);
2992 /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
2993 io_queue_num
= min_t(int, io_queue_num
, pci_msix_vec_count(pdev
) - 1);
2994 if (unlikely(!io_queue_num
)) {
2995 dev_err(&pdev
->dev
, "The device doesn't have io queues\n");
2999 return io_queue_num
;
3002 static void ena_set_push_mode(struct pci_dev
*pdev
, struct ena_com_dev
*ena_dev
,
3003 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
3007 has_mem_bar
= pci_select_bars(pdev
, IORESOURCE_MEM
) & BIT(ENA_MEM_BAR
);
3009 /* Enable push mode if device supports LLQ */
3010 if (has_mem_bar
&& (get_feat_ctx
->max_queues
.max_llq_num
> 0))
3011 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_DEV
;
3013 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
3016 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx
*feat
,
3017 struct net_device
*netdev
)
3019 netdev_features_t dev_features
= 0;
3021 /* Set offload features */
3022 if (feat
->offload
.tx
&
3023 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK
)
3024 dev_features
|= NETIF_F_IP_CSUM
;
3026 if (feat
->offload
.tx
&
3027 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK
)
3028 dev_features
|= NETIF_F_IPV6_CSUM
;
3030 if (feat
->offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK
)
3031 dev_features
|= NETIF_F_TSO
;
3033 if (feat
->offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK
)
3034 dev_features
|= NETIF_F_TSO6
;
3036 if (feat
->offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK
)
3037 dev_features
|= NETIF_F_TSO_ECN
;
3039 if (feat
->offload
.rx_supported
&
3040 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK
)
3041 dev_features
|= NETIF_F_RXCSUM
;
3043 if (feat
->offload
.rx_supported
&
3044 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK
)
3045 dev_features
|= NETIF_F_RXCSUM
;
3053 netdev
->hw_features
|= netdev
->features
;
3054 netdev
->vlan_features
|= netdev
->features
;
3057 static void ena_set_conf_feat_params(struct ena_adapter
*adapter
,
3058 struct ena_com_dev_get_features_ctx
*feat
)
3060 struct net_device
*netdev
= adapter
->netdev
;
3062 /* Copy mac address */
3063 if (!is_valid_ether_addr(feat
->dev_attr
.mac_addr
)) {
3064 eth_hw_addr_random(netdev
);
3065 ether_addr_copy(adapter
->mac_addr
, netdev
->dev_addr
);
3067 ether_addr_copy(adapter
->mac_addr
, feat
->dev_attr
.mac_addr
);
3068 ether_addr_copy(netdev
->dev_addr
, adapter
->mac_addr
);
3071 /* Set offload features */
3072 ena_set_dev_offloads(feat
, netdev
);
3074 adapter
->max_mtu
= feat
->dev_attr
.max_mtu
;
3075 netdev
->max_mtu
= adapter
->max_mtu
;
3076 netdev
->min_mtu
= ENA_MIN_MTU
;
3079 static int ena_rss_init_default(struct ena_adapter
*adapter
)
3081 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
3082 struct device
*dev
= &adapter
->pdev
->dev
;
3086 rc
= ena_com_rss_init(ena_dev
, ENA_RX_RSS_TABLE_LOG_SIZE
);
3088 dev_err(dev
, "Cannot init indirect table\n");
3092 for (i
= 0; i
< ENA_RX_RSS_TABLE_SIZE
; i
++) {
3093 val
= ethtool_rxfh_indir_default(i
, adapter
->num_queues
);
3094 rc
= ena_com_indirect_table_fill_entry(ena_dev
, i
,
3095 ENA_IO_RXQ_IDX(val
));
3096 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
))) {
3097 dev_err(dev
, "Cannot fill indirect table\n");
3098 goto err_fill_indir
;
3102 rc
= ena_com_fill_hash_function(ena_dev
, ENA_ADMIN_CRC32
, NULL
,
3103 ENA_HASH_KEY_SIZE
, 0xFFFFFFFF);
3104 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
))) {
3105 dev_err(dev
, "Cannot fill hash function\n");
3106 goto err_fill_indir
;
3109 rc
= ena_com_set_default_hash_ctrl(ena_dev
);
3110 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
))) {
3111 dev_err(dev
, "Cannot fill hash control\n");
3112 goto err_fill_indir
;
3118 ena_com_rss_destroy(ena_dev
);
3124 static void ena_release_bars(struct ena_com_dev
*ena_dev
, struct pci_dev
*pdev
)
3128 if (ena_dev
->mem_bar
)
3129 devm_iounmap(&pdev
->dev
, ena_dev
->mem_bar
);
3131 if (ena_dev
->reg_bar
)
3132 devm_iounmap(&pdev
->dev
, ena_dev
->reg_bar
);
3134 release_bars
= pci_select_bars(pdev
, IORESOURCE_MEM
) & ENA_BAR_MASK
;
3135 pci_release_selected_regions(pdev
, release_bars
);
3138 static int ena_calc_queue_size(struct pci_dev
*pdev
,
3139 struct ena_com_dev
*ena_dev
,
3140 u16
*max_tx_sgl_size
,
3141 u16
*max_rx_sgl_size
,
3142 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
3144 u32 queue_size
= ENA_DEFAULT_RING_SIZE
;
3146 queue_size
= min_t(u32
, queue_size
,
3147 get_feat_ctx
->max_queues
.max_cq_depth
);
3148 queue_size
= min_t(u32
, queue_size
,
3149 get_feat_ctx
->max_queues
.max_sq_depth
);
3151 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
3152 queue_size
= min_t(u32
, queue_size
,
3153 get_feat_ctx
->max_queues
.max_llq_depth
);
3155 queue_size
= rounddown_pow_of_two(queue_size
);
3157 if (unlikely(!queue_size
)) {
3158 dev_err(&pdev
->dev
, "Invalid queue size\n");
3162 *max_tx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
3163 get_feat_ctx
->max_queues
.max_packet_tx_descs
);
3164 *max_rx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
3165 get_feat_ctx
->max_queues
.max_packet_rx_descs
);
3170 /* ena_probe - Device Initialization Routine
3171 * @pdev: PCI device information struct
3172 * @ent: entry in ena_pci_tbl
3174 * Returns 0 on success, negative on failure
3176 * ena_probe initializes an adapter identified by a pci_dev structure.
3177 * The OS initialization, configuring of the adapter private structure,
3178 * and a hardware reset occur.
3180 static int ena_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
3182 struct ena_com_dev_get_features_ctx get_feat_ctx
;
3183 static int version_printed
;
3184 struct net_device
*netdev
;
3185 struct ena_adapter
*adapter
;
3186 struct ena_com_dev
*ena_dev
= NULL
;
3187 static int adapters_found
;
3188 int io_queue_num
, bars
, rc
;
3190 u16 tx_sgl_size
= 0;
3191 u16 rx_sgl_size
= 0;
3194 dev_dbg(&pdev
->dev
, "%s\n", __func__
);
3196 if (version_printed
++ == 0)
3197 dev_info(&pdev
->dev
, "%s", version
);
3199 rc
= pci_enable_device_mem(pdev
);
3201 dev_err(&pdev
->dev
, "pci_enable_device_mem() failed!\n");
3205 pci_set_master(pdev
);
3207 ena_dev
= vzalloc(sizeof(*ena_dev
));
3210 goto err_disable_device
;
3213 bars
= pci_select_bars(pdev
, IORESOURCE_MEM
) & ENA_BAR_MASK
;
3214 rc
= pci_request_selected_regions(pdev
, bars
, DRV_MODULE_NAME
);
3216 dev_err(&pdev
->dev
, "pci_request_selected_regions failed %d\n",
3218 goto err_free_ena_dev
;
3221 ena_dev
->reg_bar
= devm_ioremap(&pdev
->dev
,
3222 pci_resource_start(pdev
, ENA_REG_BAR
),
3223 pci_resource_len(pdev
, ENA_REG_BAR
));
3224 if (!ena_dev
->reg_bar
) {
3225 dev_err(&pdev
->dev
, "failed to remap regs bar\n");
3227 goto err_free_region
;
3230 ena_dev
->dmadev
= &pdev
->dev
;
3232 rc
= ena_device_init(ena_dev
, pdev
, &get_feat_ctx
, &wd_state
);
3234 dev_err(&pdev
->dev
, "ena device init failed\n");
3237 goto err_free_region
;
3240 ena_set_push_mode(pdev
, ena_dev
, &get_feat_ctx
);
3242 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
3243 ena_dev
->mem_bar
= devm_ioremap_wc(&pdev
->dev
,
3244 pci_resource_start(pdev
, ENA_MEM_BAR
),
3245 pci_resource_len(pdev
, ENA_MEM_BAR
));
3246 if (!ena_dev
->mem_bar
) {
3248 goto err_device_destroy
;
3252 /* initial Tx interrupt delay, Assumes 1 usec granularity.
3253 * Updated during device initialization with the real granularity
3255 ena_dev
->intr_moder_tx_interval
= ENA_INTR_INITIAL_TX_INTERVAL_USECS
;
3256 io_queue_num
= ena_calc_io_queue_num(pdev
, ena_dev
, &get_feat_ctx
);
3257 queue_size
= ena_calc_queue_size(pdev
, ena_dev
, &tx_sgl_size
,
3258 &rx_sgl_size
, &get_feat_ctx
);
3259 if ((queue_size
<= 0) || (io_queue_num
<= 0)) {
3261 goto err_device_destroy
;
3264 dev_info(&pdev
->dev
, "creating %d io queues. queue size: %d\n",
3265 io_queue_num
, queue_size
);
3267 /* dev zeroed in init_etherdev */
3268 netdev
= alloc_etherdev_mq(sizeof(struct ena_adapter
), io_queue_num
);
3270 dev_err(&pdev
->dev
, "alloc_etherdev_mq failed\n");
3272 goto err_device_destroy
;
3275 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3277 adapter
= netdev_priv(netdev
);
3278 pci_set_drvdata(pdev
, adapter
);
3280 adapter
->ena_dev
= ena_dev
;
3281 adapter
->netdev
= netdev
;
3282 adapter
->pdev
= pdev
;
3284 ena_set_conf_feat_params(adapter
, &get_feat_ctx
);
3286 adapter
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
3287 adapter
->reset_reason
= ENA_REGS_RESET_NORMAL
;
3289 adapter
->tx_ring_size
= queue_size
;
3290 adapter
->rx_ring_size
= queue_size
;
3292 adapter
->max_tx_sgl_size
= tx_sgl_size
;
3293 adapter
->max_rx_sgl_size
= rx_sgl_size
;
3295 adapter
->num_queues
= io_queue_num
;
3296 adapter
->last_monitored_tx_qid
= 0;
3298 adapter
->rx_copybreak
= ENA_DEFAULT_RX_COPYBREAK
;
3299 adapter
->wd_state
= wd_state
;
3301 snprintf(adapter
->name
, ENA_NAME_MAX_LEN
, "ena_%d", adapters_found
);
3303 rc
= ena_com_init_interrupt_moderation(adapter
->ena_dev
);
3306 "Failed to query interrupt moderation feature\n");
3307 goto err_netdev_destroy
;
3309 ena_init_io_rings(adapter
);
3311 netdev
->netdev_ops
= &ena_netdev_ops
;
3312 netdev
->watchdog_timeo
= TX_TIMEOUT
;
3313 ena_set_ethtool_ops(netdev
);
3315 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3317 u64_stats_init(&adapter
->syncp
);
3319 rc
= ena_enable_msix_and_set_admin_interrupts(adapter
, io_queue_num
);
3322 "Failed to enable and set the admin interrupts\n");
3323 goto err_worker_destroy
;
3325 rc
= ena_rss_init_default(adapter
);
3326 if (rc
&& (rc
!= -EOPNOTSUPP
)) {
3327 dev_err(&pdev
->dev
, "Cannot init RSS rc: %d\n", rc
);
3331 ena_config_debug_area(adapter
);
3333 memcpy(adapter
->netdev
->perm_addr
, adapter
->mac_addr
, netdev
->addr_len
);
3335 netif_carrier_off(netdev
);
3337 rc
= register_netdev(netdev
);
3339 dev_err(&pdev
->dev
, "Cannot register net device\n");
3343 INIT_WORK(&adapter
->reset_task
, ena_fw_reset_device
);
3345 adapter
->last_keep_alive_jiffies
= jiffies
;
3346 adapter
->keep_alive_timeout
= ENA_DEVICE_KALIVE_TIMEOUT
;
3347 adapter
->missing_tx_completion_to
= TX_TIMEOUT
;
3348 adapter
->missing_tx_completion_threshold
= MAX_NUM_OF_TIMEOUTED_PACKETS
;
3350 ena_update_hints(adapter
, &get_feat_ctx
.hw_hints
);
3352 timer_setup(&adapter
->timer_service
, ena_timer_service
, 0);
3353 mod_timer(&adapter
->timer_service
, round_jiffies(jiffies
+ HZ
));
3355 dev_info(&pdev
->dev
, "%s found at mem %lx, mac addr %pM Queues %d\n",
3356 DEVICE_NAME
, (long)pci_resource_start(pdev
, 0),
3357 netdev
->dev_addr
, io_queue_num
);
3359 set_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
);
3366 ena_com_delete_debug_area(ena_dev
);
3367 ena_com_rss_destroy(ena_dev
);
3369 ena_com_dev_reset(ena_dev
, ENA_REGS_RESET_INIT_ERR
);
3370 ena_free_mgmnt_irq(adapter
);
3371 ena_disable_msix(adapter
);
3373 ena_com_destroy_interrupt_moderation(ena_dev
);
3374 del_timer(&adapter
->timer_service
);
3376 free_netdev(netdev
);
3378 ena_com_delete_host_info(ena_dev
);
3379 ena_com_admin_destroy(ena_dev
);
3381 ena_release_bars(ena_dev
, pdev
);
3385 pci_disable_device(pdev
);
3389 /*****************************************************************************/
3391 /* ena_remove - Device Removal Routine
3392 * @pdev: PCI device information struct
3394 * ena_remove is called by the PCI subsystem to alert the driver
3395 * that it should release a PCI device.
3397 static void ena_remove(struct pci_dev
*pdev
)
3399 struct ena_adapter
*adapter
= pci_get_drvdata(pdev
);
3400 struct ena_com_dev
*ena_dev
;
3401 struct net_device
*netdev
;
3403 ena_dev
= adapter
->ena_dev
;
3404 netdev
= adapter
->netdev
;
3406 #ifdef CONFIG_RFS_ACCEL
3407 if ((adapter
->msix_vecs
>= 1) && (netdev
->rx_cpu_rmap
)) {
3408 free_irq_cpu_rmap(netdev
->rx_cpu_rmap
);
3409 netdev
->rx_cpu_rmap
= NULL
;
3411 #endif /* CONFIG_RFS_ACCEL */
3412 del_timer_sync(&adapter
->timer_service
);
3414 cancel_work_sync(&adapter
->reset_task
);
3416 unregister_netdev(netdev
);
3418 /* Reset the device only if the device is running. */
3419 if (test_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
))
3420 ena_com_dev_reset(ena_dev
, adapter
->reset_reason
);
3422 ena_free_mgmnt_irq(adapter
);
3424 ena_disable_msix(adapter
);
3426 free_netdev(netdev
);
3428 ena_com_mmio_reg_read_request_destroy(ena_dev
);
3430 ena_com_abort_admin_commands(ena_dev
);
3432 ena_com_wait_for_abort_completion(ena_dev
);
3434 ena_com_admin_destroy(ena_dev
);
3436 ena_com_rss_destroy(ena_dev
);
3438 ena_com_delete_debug_area(ena_dev
);
3440 ena_com_delete_host_info(ena_dev
);
3442 ena_release_bars(ena_dev
, pdev
);
3444 pci_disable_device(pdev
);
3446 ena_com_destroy_interrupt_moderation(ena_dev
);
3452 /* ena_suspend - PM suspend callback
3453 * @pdev: PCI device information struct
3454 * @state:power state
3456 static int ena_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3458 struct ena_adapter
*adapter
= pci_get_drvdata(pdev
);
3460 u64_stats_update_begin(&adapter
->syncp
);
3461 adapter
->dev_stats
.suspend
++;
3462 u64_stats_update_end(&adapter
->syncp
);
3465 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
3467 "ignoring device reset request as the device is being suspended\n");
3468 clear_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
3470 ena_destroy_device(adapter
);
3475 /* ena_resume - PM resume callback
3476 * @pdev: PCI device information struct
3479 static int ena_resume(struct pci_dev
*pdev
)
3481 struct ena_adapter
*adapter
= pci_get_drvdata(pdev
);
3484 u64_stats_update_begin(&adapter
->syncp
);
3485 adapter
->dev_stats
.resume
++;
3486 u64_stats_update_end(&adapter
->syncp
);
3489 rc
= ena_restore_device(adapter
);
3495 static struct pci_driver ena_pci_driver
= {
3496 .name
= DRV_MODULE_NAME
,
3497 .id_table
= ena_pci_tbl
,
3499 .remove
= ena_remove
,
3501 .suspend
= ena_suspend
,
3502 .resume
= ena_resume
,
3504 .sriov_configure
= pci_sriov_configure_simple
,
3507 static int __init
ena_init(void)
3509 pr_info("%s", version
);
3511 ena_wq
= create_singlethread_workqueue(DRV_MODULE_NAME
);
3513 pr_err("Failed to create workqueue\n");
3517 return pci_register_driver(&ena_pci_driver
);
3520 static void __exit
ena_cleanup(void)
3522 pci_unregister_driver(&ena_pci_driver
);
3525 destroy_workqueue(ena_wq
);
3530 /******************************************************************************
3531 ******************************** AENQ Handlers *******************************
3532 *****************************************************************************/
3533 /* ena_update_on_link_change:
3534 * Notify the network interface about the change in link status
3536 static void ena_update_on_link_change(void *adapter_data
,
3537 struct ena_admin_aenq_entry
*aenq_e
)
3539 struct ena_adapter
*adapter
= (struct ena_adapter
*)adapter_data
;
3540 struct ena_admin_aenq_link_change_desc
*aenq_desc
=
3541 (struct ena_admin_aenq_link_change_desc
*)aenq_e
;
3542 int status
= aenq_desc
->flags
&
3543 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK
;
3546 netdev_dbg(adapter
->netdev
, "%s\n", __func__
);
3547 set_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
);
3548 if (!test_bit(ENA_FLAG_ONGOING_RESET
, &adapter
->flags
))
3549 netif_carrier_on(adapter
->netdev
);
3551 clear_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
);
3552 netif_carrier_off(adapter
->netdev
);
3556 static void ena_keep_alive_wd(void *adapter_data
,
3557 struct ena_admin_aenq_entry
*aenq_e
)
3559 struct ena_adapter
*adapter
= (struct ena_adapter
*)adapter_data
;
3560 struct ena_admin_aenq_keep_alive_desc
*desc
;
3563 desc
= (struct ena_admin_aenq_keep_alive_desc
*)aenq_e
;
3564 adapter
->last_keep_alive_jiffies
= jiffies
;
3566 rx_drops
= ((u64
)desc
->rx_drops_high
<< 32) | desc
->rx_drops_low
;
3568 u64_stats_update_begin(&adapter
->syncp
);
3569 adapter
->dev_stats
.rx_drops
= rx_drops
;
3570 u64_stats_update_end(&adapter
->syncp
);
3573 static void ena_notification(void *adapter_data
,
3574 struct ena_admin_aenq_entry
*aenq_e
)
3576 struct ena_adapter
*adapter
= (struct ena_adapter
*)adapter_data
;
3577 struct ena_admin_ena_hw_hints
*hints
;
3579 WARN(aenq_e
->aenq_common_desc
.group
!= ENA_ADMIN_NOTIFICATION
,
3580 "Invalid group(%x) expected %x\n",
3581 aenq_e
->aenq_common_desc
.group
,
3582 ENA_ADMIN_NOTIFICATION
);
3584 switch (aenq_e
->aenq_common_desc
.syndrom
) {
3585 case ENA_ADMIN_UPDATE_HINTS
:
3586 hints
= (struct ena_admin_ena_hw_hints
*)
3587 (&aenq_e
->inline_data_w4
);
3588 ena_update_hints(adapter
, hints
);
3591 netif_err(adapter
, drv
, adapter
->netdev
,
3592 "Invalid aenq notification link state %d\n",
3593 aenq_e
->aenq_common_desc
.syndrom
);
3597 /* This handler will called for unknown event group or unimplemented handlers*/
3598 static void unimplemented_aenq_handler(void *data
,
3599 struct ena_admin_aenq_entry
*aenq_e
)
3601 struct ena_adapter
*adapter
= (struct ena_adapter
*)data
;
3603 netif_err(adapter
, drv
, adapter
->netdev
,
3604 "Unknown event was received or event with unimplemented handler\n");
3607 static struct ena_aenq_handlers aenq_handlers
= {
3609 [ENA_ADMIN_LINK_CHANGE
] = ena_update_on_link_change
,
3610 [ENA_ADMIN_NOTIFICATION
] = ena_notification
,
3611 [ENA_ADMIN_KEEP_ALIVE
] = ena_keep_alive_wd
,
3613 .unimplemented_handler
= unimplemented_aenq_handler
3616 module_init(ena_init
);
3617 module_exit(ena_cleanup
);