1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #ifdef CONFIG_RFS_ACCEL
9 #include <linux/cpu_rmap.h>
10 #endif /* CONFIG_RFS_ACCEL */
11 #include <linux/ethtool.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/numa.h>
15 #include <linux/pci.h>
16 #include <linux/utsname.h>
17 #include <linux/version.h>
18 #include <linux/vmalloc.h>
21 #include "ena_netdev.h"
22 #include <linux/bpf_trace.h>
23 #include "ena_pci_id_tbl.h"
25 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
26 MODULE_DESCRIPTION(DEVICE_NAME
);
27 MODULE_LICENSE("GPL");
29 /* Time in jiffies before concluding the transmitter is hung. */
30 #define TX_TIMEOUT (5 * HZ)
32 #define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus())
34 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
35 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
37 static struct ena_aenq_handlers aenq_handlers
;
39 static struct workqueue_struct
*ena_wq
;
41 MODULE_DEVICE_TABLE(pci
, ena_pci_tbl
);
43 static int ena_rss_init_default(struct ena_adapter
*adapter
);
44 static void check_for_admin_com_state(struct ena_adapter
*adapter
);
45 static void ena_destroy_device(struct ena_adapter
*adapter
, bool graceful
);
46 static int ena_restore_device(struct ena_adapter
*adapter
);
48 static void ena_init_io_rings(struct ena_adapter
*adapter
,
49 int first_index
, int count
);
50 static void ena_init_napi_in_range(struct ena_adapter
*adapter
, int first_index
,
52 static void ena_del_napi_in_range(struct ena_adapter
*adapter
, int first_index
,
54 static int ena_setup_tx_resources(struct ena_adapter
*adapter
, int qid
);
55 static int ena_setup_tx_resources_in_range(struct ena_adapter
*adapter
,
58 static int ena_create_io_tx_queue(struct ena_adapter
*adapter
, int qid
);
59 static void ena_free_tx_resources(struct ena_adapter
*adapter
, int qid
);
60 static int ena_clean_xdp_irq(struct ena_ring
*xdp_ring
, u32 budget
);
61 static void ena_destroy_all_tx_queues(struct ena_adapter
*adapter
);
62 static void ena_free_all_io_tx_resources(struct ena_adapter
*adapter
);
63 static void ena_napi_disable_in_range(struct ena_adapter
*adapter
,
64 int first_index
, int count
);
65 static void ena_napi_enable_in_range(struct ena_adapter
*adapter
,
66 int first_index
, int count
);
67 static int ena_up(struct ena_adapter
*adapter
);
68 static void ena_down(struct ena_adapter
*adapter
);
69 static void ena_unmask_interrupt(struct ena_ring
*tx_ring
,
70 struct ena_ring
*rx_ring
);
71 static void ena_update_ring_numa_node(struct ena_ring
*tx_ring
,
72 struct ena_ring
*rx_ring
);
73 static void ena_unmap_tx_buff(struct ena_ring
*tx_ring
,
74 struct ena_tx_buffer
*tx_info
);
75 static int ena_create_io_tx_queues_in_range(struct ena_adapter
*adapter
,
76 int first_index
, int count
);
78 /* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
79 static void ena_increase_stat(u64
*statp
, u64 cnt
,
80 struct u64_stats_sync
*syncp
)
82 u64_stats_update_begin(syncp
);
84 u64_stats_update_end(syncp
);
87 static void ena_ring_tx_doorbell(struct ena_ring
*tx_ring
)
89 ena_com_write_sq_doorbell(tx_ring
->ena_com_io_sq
);
90 ena_increase_stat(&tx_ring
->tx_stats
.doorbells
, 1, &tx_ring
->syncp
);
93 static void ena_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
95 struct ena_adapter
*adapter
= netdev_priv(dev
);
97 /* Change the state of the device to trigger reset
98 * Check that we are not in the middle or a trigger already
101 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))
104 ena_reset_device(adapter
, ENA_REGS_RESET_OS_NETDEV_WD
);
105 ena_increase_stat(&adapter
->dev_stats
.tx_timeout
, 1, &adapter
->syncp
);
107 netif_err(adapter
, tx_err
, dev
, "Transmit time out\n");
110 static void update_rx_ring_mtu(struct ena_adapter
*adapter
, int mtu
)
114 for (i
= 0; i
< adapter
->num_io_queues
; i
++)
115 adapter
->rx_ring
[i
].mtu
= mtu
;
118 static int ena_change_mtu(struct net_device
*dev
, int new_mtu
)
120 struct ena_adapter
*adapter
= netdev_priv(dev
);
123 ret
= ena_com_set_dev_mtu(adapter
->ena_dev
, new_mtu
);
125 netif_dbg(adapter
, drv
, dev
, "Set MTU to %d\n", new_mtu
);
126 update_rx_ring_mtu(adapter
, new_mtu
);
129 netif_err(adapter
, drv
, dev
, "Failed to set MTU to %d\n",
136 static int ena_xmit_common(struct net_device
*dev
,
137 struct ena_ring
*ring
,
138 struct ena_tx_buffer
*tx_info
,
139 struct ena_com_tx_ctx
*ena_tx_ctx
,
143 struct ena_adapter
*adapter
= netdev_priv(dev
);
146 if (unlikely(ena_com_is_doorbell_needed(ring
->ena_com_io_sq
,
148 netif_dbg(adapter
, tx_queued
, dev
,
149 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
151 ena_ring_tx_doorbell(ring
);
154 /* prepare the packet's descriptors to dma engine */
155 rc
= ena_com_prepare_tx(ring
->ena_com_io_sq
, ena_tx_ctx
,
158 /* In case there isn't enough space in the queue for the packet,
159 * we simply drop it. All other failure reasons of
160 * ena_com_prepare_tx() are fatal and therefore require a device reset.
163 netif_err(adapter
, tx_queued
, dev
,
164 "Failed to prepare tx bufs\n");
165 ena_increase_stat(&ring
->tx_stats
.prepare_ctx_err
, 1,
168 ena_reset_device(adapter
,
169 ENA_REGS_RESET_DRIVER_INVALID_STATE
);
173 u64_stats_update_begin(&ring
->syncp
);
174 ring
->tx_stats
.cnt
++;
175 ring
->tx_stats
.bytes
+= bytes
;
176 u64_stats_update_end(&ring
->syncp
);
178 tx_info
->tx_descs
= nb_hw_desc
;
179 tx_info
->last_jiffies
= jiffies
;
180 tx_info
->print_once
= 0;
182 ring
->next_to_use
= ENA_TX_RING_IDX_NEXT(next_to_use
,
187 /* This is the XDP napi callback. XDP queues use a separate napi callback
190 static int ena_xdp_io_poll(struct napi_struct
*napi
, int budget
)
192 struct ena_napi
*ena_napi
= container_of(napi
, struct ena_napi
, napi
);
193 u32 xdp_work_done
, xdp_budget
;
194 struct ena_ring
*xdp_ring
;
195 int napi_comp_call
= 0;
198 xdp_ring
= ena_napi
->xdp_ring
;
202 if (!test_bit(ENA_FLAG_DEV_UP
, &xdp_ring
->adapter
->flags
) ||
203 test_bit(ENA_FLAG_TRIGGER_RESET
, &xdp_ring
->adapter
->flags
)) {
204 napi_complete_done(napi
, 0);
208 xdp_work_done
= ena_clean_xdp_irq(xdp_ring
, xdp_budget
);
210 /* If the device is about to reset or down, avoid unmask
211 * the interrupt and return 0 so NAPI won't reschedule
213 if (unlikely(!test_bit(ENA_FLAG_DEV_UP
, &xdp_ring
->adapter
->flags
))) {
214 napi_complete_done(napi
, 0);
216 } else if (xdp_budget
> xdp_work_done
) {
218 if (napi_complete_done(napi
, xdp_work_done
))
219 ena_unmask_interrupt(xdp_ring
, NULL
);
220 ena_update_ring_numa_node(xdp_ring
, NULL
);
226 u64_stats_update_begin(&xdp_ring
->syncp
);
227 xdp_ring
->tx_stats
.napi_comp
+= napi_comp_call
;
228 xdp_ring
->tx_stats
.tx_poll
++;
229 u64_stats_update_end(&xdp_ring
->syncp
);
230 xdp_ring
->tx_stats
.last_napi_jiffies
= jiffies
;
235 static int ena_xdp_tx_map_frame(struct ena_ring
*xdp_ring
,
236 struct ena_tx_buffer
*tx_info
,
237 struct xdp_frame
*xdpf
,
238 struct ena_com_tx_ctx
*ena_tx_ctx
)
240 struct ena_adapter
*adapter
= xdp_ring
->adapter
;
241 struct ena_com_buf
*ena_buf
;
247 tx_info
->xdpf
= xdpf
;
248 data
= tx_info
->xdpf
->data
;
249 size
= tx_info
->xdpf
->len
;
251 if (xdp_ring
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
252 /* Designate part of the packet for LLQ */
253 push_len
= min_t(u32
, size
, xdp_ring
->tx_max_header_size
);
255 ena_tx_ctx
->push_header
= data
;
261 ena_tx_ctx
->header_len
= push_len
;
264 dma
= dma_map_single(xdp_ring
->dev
,
268 if (unlikely(dma_mapping_error(xdp_ring
->dev
, dma
)))
269 goto error_report_dma_error
;
271 tx_info
->map_linear_data
= 0;
273 ena_buf
= tx_info
->bufs
;
274 ena_buf
->paddr
= dma
;
277 ena_tx_ctx
->ena_bufs
= ena_buf
;
278 ena_tx_ctx
->num_bufs
= tx_info
->num_of_bufs
= 1;
283 error_report_dma_error
:
284 ena_increase_stat(&xdp_ring
->tx_stats
.dma_mapping_err
, 1,
286 netif_warn(adapter
, tx_queued
, adapter
->netdev
, "Failed to map xdp buff\n");
291 static int ena_xdp_xmit_frame(struct ena_ring
*xdp_ring
,
292 struct net_device
*dev
,
293 struct xdp_frame
*xdpf
,
296 struct ena_com_tx_ctx ena_tx_ctx
= {};
297 struct ena_tx_buffer
*tx_info
;
298 u16 next_to_use
, req_id
;
301 next_to_use
= xdp_ring
->next_to_use
;
302 req_id
= xdp_ring
->free_ids
[next_to_use
];
303 tx_info
= &xdp_ring
->tx_buffer_info
[req_id
];
304 tx_info
->num_of_bufs
= 0;
306 rc
= ena_xdp_tx_map_frame(xdp_ring
, tx_info
, xdpf
, &ena_tx_ctx
);
310 ena_tx_ctx
.req_id
= req_id
;
312 rc
= ena_xmit_common(dev
,
319 goto error_unmap_dma
;
321 /* trigger the dma engine. ena_ring_tx_doorbell()
322 * calls a memory barrier inside it.
324 if (flags
& XDP_XMIT_FLUSH
)
325 ena_ring_tx_doorbell(xdp_ring
);
330 ena_unmap_tx_buff(xdp_ring
, tx_info
);
331 tx_info
->xdpf
= NULL
;
335 static int ena_xdp_xmit(struct net_device
*dev
, int n
,
336 struct xdp_frame
**frames
, u32 flags
)
338 struct ena_adapter
*adapter
= netdev_priv(dev
);
339 struct ena_ring
*xdp_ring
;
340 int qid
, i
, nxmit
= 0;
342 if (unlikely(flags
& ~XDP_XMIT_FLAGS_MASK
))
345 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
348 /* We assume that all rings have the same XDP program */
349 if (!READ_ONCE(adapter
->rx_ring
->xdp_bpf_prog
))
352 qid
= smp_processor_id() % adapter
->xdp_num_queues
;
353 qid
+= adapter
->xdp_first_ring
;
354 xdp_ring
= &adapter
->tx_ring
[qid
];
356 /* Other CPU ids might try to send thorugh this queue */
357 spin_lock(&xdp_ring
->xdp_tx_lock
);
359 for (i
= 0; i
< n
; i
++) {
360 if (ena_xdp_xmit_frame(xdp_ring
, dev
, frames
[i
], 0))
365 /* Ring doorbell to make device aware of the packets */
366 if (flags
& XDP_XMIT_FLUSH
)
367 ena_ring_tx_doorbell(xdp_ring
);
369 spin_unlock(&xdp_ring
->xdp_tx_lock
);
371 /* Return number of packets sent */
375 static int ena_xdp_execute(struct ena_ring
*rx_ring
, struct xdp_buff
*xdp
)
377 u32 verdict
= ENA_XDP_PASS
;
378 struct bpf_prog
*xdp_prog
;
379 struct ena_ring
*xdp_ring
;
380 struct xdp_frame
*xdpf
;
383 xdp_prog
= READ_ONCE(rx_ring
->xdp_bpf_prog
);
388 verdict
= bpf_prog_run_xdp(xdp_prog
, xdp
);
392 xdpf
= xdp_convert_buff_to_frame(xdp
);
393 if (unlikely(!xdpf
)) {
394 trace_xdp_exception(rx_ring
->netdev
, xdp_prog
, verdict
);
395 xdp_stat
= &rx_ring
->rx_stats
.xdp_aborted
;
396 verdict
= ENA_XDP_DROP
;
400 /* Find xmit queue */
401 xdp_ring
= rx_ring
->xdp_ring
;
403 /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
404 spin_lock(&xdp_ring
->xdp_tx_lock
);
406 if (ena_xdp_xmit_frame(xdp_ring
, rx_ring
->netdev
, xdpf
,
408 xdp_return_frame(xdpf
);
410 spin_unlock(&xdp_ring
->xdp_tx_lock
);
411 xdp_stat
= &rx_ring
->rx_stats
.xdp_tx
;
412 verdict
= ENA_XDP_TX
;
415 if (likely(!xdp_do_redirect(rx_ring
->netdev
, xdp
, xdp_prog
))) {
416 xdp_stat
= &rx_ring
->rx_stats
.xdp_redirect
;
417 verdict
= ENA_XDP_REDIRECT
;
420 trace_xdp_exception(rx_ring
->netdev
, xdp_prog
, verdict
);
421 xdp_stat
= &rx_ring
->rx_stats
.xdp_aborted
;
422 verdict
= ENA_XDP_DROP
;
425 trace_xdp_exception(rx_ring
->netdev
, xdp_prog
, verdict
);
426 xdp_stat
= &rx_ring
->rx_stats
.xdp_aborted
;
427 verdict
= ENA_XDP_DROP
;
430 xdp_stat
= &rx_ring
->rx_stats
.xdp_drop
;
431 verdict
= ENA_XDP_DROP
;
434 xdp_stat
= &rx_ring
->rx_stats
.xdp_pass
;
435 verdict
= ENA_XDP_PASS
;
438 bpf_warn_invalid_xdp_action(rx_ring
->netdev
, xdp_prog
, verdict
);
439 xdp_stat
= &rx_ring
->rx_stats
.xdp_invalid
;
440 verdict
= ENA_XDP_DROP
;
443 ena_increase_stat(xdp_stat
, 1, &rx_ring
->syncp
);
448 static void ena_init_all_xdp_queues(struct ena_adapter
*adapter
)
450 adapter
->xdp_first_ring
= adapter
->num_io_queues
;
451 adapter
->xdp_num_queues
= adapter
->num_io_queues
;
453 ena_init_io_rings(adapter
,
454 adapter
->xdp_first_ring
,
455 adapter
->xdp_num_queues
);
458 static int ena_setup_and_create_all_xdp_queues(struct ena_adapter
*adapter
)
462 rc
= ena_setup_tx_resources_in_range(adapter
, adapter
->xdp_first_ring
,
463 adapter
->xdp_num_queues
);
467 rc
= ena_create_io_tx_queues_in_range(adapter
,
468 adapter
->xdp_first_ring
,
469 adapter
->xdp_num_queues
);
476 ena_free_all_io_tx_resources(adapter
);
481 /* Provides a way for both kernel and bpf-prog to know
482 * more about the RX-queue a given XDP frame arrived on.
484 static int ena_xdp_register_rxq_info(struct ena_ring
*rx_ring
)
488 rc
= xdp_rxq_info_reg(&rx_ring
->xdp_rxq
, rx_ring
->netdev
, rx_ring
->qid
, 0);
491 netif_err(rx_ring
->adapter
, ifup
, rx_ring
->netdev
,
492 "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
497 rc
= xdp_rxq_info_reg_mem_model(&rx_ring
->xdp_rxq
, MEM_TYPE_PAGE_SHARED
,
501 netif_err(rx_ring
->adapter
, ifup
, rx_ring
->netdev
,
502 "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
504 xdp_rxq_info_unreg(&rx_ring
->xdp_rxq
);
511 static void ena_xdp_unregister_rxq_info(struct ena_ring
*rx_ring
)
513 xdp_rxq_info_unreg_mem_model(&rx_ring
->xdp_rxq
);
514 xdp_rxq_info_unreg(&rx_ring
->xdp_rxq
);
517 static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter
*adapter
,
518 struct bpf_prog
*prog
,
519 int first
, int count
)
521 struct bpf_prog
*old_bpf_prog
;
522 struct ena_ring
*rx_ring
;
525 for (i
= first
; i
< count
; i
++) {
526 rx_ring
= &adapter
->rx_ring
[i
];
527 old_bpf_prog
= xchg(&rx_ring
->xdp_bpf_prog
, prog
);
529 if (!old_bpf_prog
&& prog
) {
530 ena_xdp_register_rxq_info(rx_ring
);
531 rx_ring
->rx_headroom
= XDP_PACKET_HEADROOM
;
532 } else if (old_bpf_prog
&& !prog
) {
533 ena_xdp_unregister_rxq_info(rx_ring
);
534 rx_ring
->rx_headroom
= NET_SKB_PAD
;
539 static void ena_xdp_exchange_program(struct ena_adapter
*adapter
,
540 struct bpf_prog
*prog
)
542 struct bpf_prog
*old_bpf_prog
= xchg(&adapter
->xdp_bpf_prog
, prog
);
544 ena_xdp_exchange_program_rx_in_range(adapter
,
547 adapter
->num_io_queues
);
550 bpf_prog_put(old_bpf_prog
);
553 static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter
*adapter
)
558 was_up
= test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
563 adapter
->xdp_first_ring
= 0;
564 adapter
->xdp_num_queues
= 0;
565 ena_xdp_exchange_program(adapter
, NULL
);
567 rc
= ena_up(adapter
);
574 static int ena_xdp_set(struct net_device
*netdev
, struct netdev_bpf
*bpf
)
576 struct ena_adapter
*adapter
= netdev_priv(netdev
);
577 struct bpf_prog
*prog
= bpf
->prog
;
578 struct bpf_prog
*old_bpf_prog
;
582 is_up
= test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
583 rc
= ena_xdp_allowed(adapter
);
584 if (rc
== ENA_XDP_ALLOWED
) {
585 old_bpf_prog
= adapter
->xdp_bpf_prog
;
588 ena_init_all_xdp_queues(adapter
);
589 } else if (!old_bpf_prog
) {
591 ena_init_all_xdp_queues(adapter
);
593 ena_xdp_exchange_program(adapter
, prog
);
595 if (is_up
&& !old_bpf_prog
) {
596 rc
= ena_up(adapter
);
600 xdp_features_set_redirect_target(netdev
, false);
601 } else if (old_bpf_prog
) {
602 xdp_features_clear_redirect_target(netdev
);
603 rc
= ena_destroy_and_free_all_xdp_queues(adapter
);
608 prev_mtu
= netdev
->max_mtu
;
609 netdev
->max_mtu
= prog
? ENA_XDP_MAX_MTU
: adapter
->max_mtu
;
612 netif_info(adapter
, drv
, adapter
->netdev
,
613 "XDP program is set, changing the max_mtu from %d to %d",
614 prev_mtu
, netdev
->max_mtu
);
616 } else if (rc
== ENA_XDP_CURRENT_MTU_TOO_LARGE
) {
617 netif_err(adapter
, drv
, adapter
->netdev
,
618 "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
619 netdev
->mtu
, ENA_XDP_MAX_MTU
);
620 NL_SET_ERR_MSG_MOD(bpf
->extack
,
621 "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
623 } else if (rc
== ENA_XDP_NO_ENOUGH_QUEUES
) {
624 netif_err(adapter
, drv
, adapter
->netdev
,
625 "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
626 adapter
->num_io_queues
, adapter
->max_num_io_queues
);
627 NL_SET_ERR_MSG_MOD(bpf
->extack
,
628 "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
635 /* This is the main xdp callback, it's used by the kernel to set/unset the xdp
636 * program as well as to query the current xdp program id.
638 static int ena_xdp(struct net_device
*netdev
, struct netdev_bpf
*bpf
)
640 switch (bpf
->command
) {
642 return ena_xdp_set(netdev
, bpf
);
649 static int ena_init_rx_cpu_rmap(struct ena_adapter
*adapter
)
651 #ifdef CONFIG_RFS_ACCEL
655 adapter
->netdev
->rx_cpu_rmap
= alloc_irq_cpu_rmap(adapter
->num_io_queues
);
656 if (!adapter
->netdev
->rx_cpu_rmap
)
658 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
659 int irq_idx
= ENA_IO_IRQ_IDX(i
);
661 rc
= irq_cpu_rmap_add(adapter
->netdev
->rx_cpu_rmap
,
662 pci_irq_vector(adapter
->pdev
, irq_idx
));
664 free_irq_cpu_rmap(adapter
->netdev
->rx_cpu_rmap
);
665 adapter
->netdev
->rx_cpu_rmap
= NULL
;
669 #endif /* CONFIG_RFS_ACCEL */
673 static void ena_init_io_rings_common(struct ena_adapter
*adapter
,
674 struct ena_ring
*ring
, u16 qid
)
677 ring
->pdev
= adapter
->pdev
;
678 ring
->dev
= &adapter
->pdev
->dev
;
679 ring
->netdev
= adapter
->netdev
;
680 ring
->napi
= &adapter
->ena_napi
[qid
].napi
;
681 ring
->adapter
= adapter
;
682 ring
->ena_dev
= adapter
->ena_dev
;
683 ring
->per_napi_packets
= 0;
686 ring
->no_interrupt_event_cnt
= 0;
687 u64_stats_init(&ring
->syncp
);
690 static void ena_init_io_rings(struct ena_adapter
*adapter
,
691 int first_index
, int count
)
693 struct ena_com_dev
*ena_dev
;
694 struct ena_ring
*txr
, *rxr
;
697 ena_dev
= adapter
->ena_dev
;
699 for (i
= first_index
; i
< first_index
+ count
; i
++) {
700 txr
= &adapter
->tx_ring
[i
];
701 rxr
= &adapter
->rx_ring
[i
];
703 /* TX common ring state */
704 ena_init_io_rings_common(adapter
, txr
, i
);
706 /* TX specific ring state */
707 txr
->ring_size
= adapter
->requested_tx_ring_size
;
708 txr
->tx_max_header_size
= ena_dev
->tx_max_header_size
;
709 txr
->tx_mem_queue_type
= ena_dev
->tx_mem_queue_type
;
710 txr
->sgl_size
= adapter
->max_tx_sgl_size
;
711 txr
->smoothed_interval
=
712 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev
);
713 txr
->disable_meta_caching
= adapter
->disable_meta_caching
;
714 spin_lock_init(&txr
->xdp_tx_lock
);
716 /* Don't init RX queues for xdp queues */
717 if (!ENA_IS_XDP_INDEX(adapter
, i
)) {
718 /* RX common ring state */
719 ena_init_io_rings_common(adapter
, rxr
, i
);
721 /* RX specific ring state */
722 rxr
->ring_size
= adapter
->requested_rx_ring_size
;
723 rxr
->rx_copybreak
= adapter
->rx_copybreak
;
724 rxr
->sgl_size
= adapter
->max_rx_sgl_size
;
725 rxr
->smoothed_interval
=
726 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev
);
727 rxr
->empty_rx_queue
= 0;
728 rxr
->rx_headroom
= NET_SKB_PAD
;
729 adapter
->ena_napi
[i
].dim
.mode
= DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
730 rxr
->xdp_ring
= &adapter
->tx_ring
[i
+ adapter
->num_io_queues
];
735 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
736 * @adapter: network interface device structure
739 * Return 0 on success, negative on failure
741 static int ena_setup_tx_resources(struct ena_adapter
*adapter
, int qid
)
743 struct ena_ring
*tx_ring
= &adapter
->tx_ring
[qid
];
744 struct ena_irq
*ena_irq
= &adapter
->irq_tbl
[ENA_IO_IRQ_IDX(qid
)];
747 if (tx_ring
->tx_buffer_info
) {
748 netif_err(adapter
, ifup
,
749 adapter
->netdev
, "tx_buffer_info info is not NULL");
753 size
= sizeof(struct ena_tx_buffer
) * tx_ring
->ring_size
;
754 node
= cpu_to_node(ena_irq
->cpu
);
756 tx_ring
->tx_buffer_info
= vzalloc_node(size
, node
);
757 if (!tx_ring
->tx_buffer_info
) {
758 tx_ring
->tx_buffer_info
= vzalloc(size
);
759 if (!tx_ring
->tx_buffer_info
)
760 goto err_tx_buffer_info
;
763 size
= sizeof(u16
) * tx_ring
->ring_size
;
764 tx_ring
->free_ids
= vzalloc_node(size
, node
);
765 if (!tx_ring
->free_ids
) {
766 tx_ring
->free_ids
= vzalloc(size
);
767 if (!tx_ring
->free_ids
)
768 goto err_tx_free_ids
;
771 size
= tx_ring
->tx_max_header_size
;
772 tx_ring
->push_buf_intermediate_buf
= vzalloc_node(size
, node
);
773 if (!tx_ring
->push_buf_intermediate_buf
) {
774 tx_ring
->push_buf_intermediate_buf
= vzalloc(size
);
775 if (!tx_ring
->push_buf_intermediate_buf
)
776 goto err_push_buf_intermediate_buf
;
779 /* Req id ring for TX out of order completions */
780 for (i
= 0; i
< tx_ring
->ring_size
; i
++)
781 tx_ring
->free_ids
[i
] = i
;
783 /* Reset tx statistics */
784 memset(&tx_ring
->tx_stats
, 0x0, sizeof(tx_ring
->tx_stats
));
786 tx_ring
->next_to_use
= 0;
787 tx_ring
->next_to_clean
= 0;
788 tx_ring
->cpu
= ena_irq
->cpu
;
789 tx_ring
->numa_node
= node
;
792 err_push_buf_intermediate_buf
:
793 vfree(tx_ring
->free_ids
);
794 tx_ring
->free_ids
= NULL
;
796 vfree(tx_ring
->tx_buffer_info
);
797 tx_ring
->tx_buffer_info
= NULL
;
802 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
803 * @adapter: network interface device structure
806 * Free all transmit software resources
808 static void ena_free_tx_resources(struct ena_adapter
*adapter
, int qid
)
810 struct ena_ring
*tx_ring
= &adapter
->tx_ring
[qid
];
812 vfree(tx_ring
->tx_buffer_info
);
813 tx_ring
->tx_buffer_info
= NULL
;
815 vfree(tx_ring
->free_ids
);
816 tx_ring
->free_ids
= NULL
;
818 vfree(tx_ring
->push_buf_intermediate_buf
);
819 tx_ring
->push_buf_intermediate_buf
= NULL
;
822 static int ena_setup_tx_resources_in_range(struct ena_adapter
*adapter
,
828 for (i
= first_index
; i
< first_index
+ count
; i
++) {
829 rc
= ena_setup_tx_resources(adapter
, i
);
838 netif_err(adapter
, ifup
, adapter
->netdev
,
839 "Tx queue %d: allocation failed\n", i
);
841 /* rewind the index freeing the rings as we go */
842 while (first_index
< i
--)
843 ena_free_tx_resources(adapter
, i
);
847 static void ena_free_all_io_tx_resources_in_range(struct ena_adapter
*adapter
,
848 int first_index
, int count
)
852 for (i
= first_index
; i
< first_index
+ count
; i
++)
853 ena_free_tx_resources(adapter
, i
);
856 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
857 * @adapter: board private structure
859 * Free all transmit software resources
861 static void ena_free_all_io_tx_resources(struct ena_adapter
*adapter
)
863 ena_free_all_io_tx_resources_in_range(adapter
,
865 adapter
->xdp_num_queues
+
866 adapter
->num_io_queues
);
869 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
870 * @adapter: network interface device structure
873 * Returns 0 on success, negative on failure
875 static int ena_setup_rx_resources(struct ena_adapter
*adapter
,
878 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[qid
];
879 struct ena_irq
*ena_irq
= &adapter
->irq_tbl
[ENA_IO_IRQ_IDX(qid
)];
882 if (rx_ring
->rx_buffer_info
) {
883 netif_err(adapter
, ifup
, adapter
->netdev
,
884 "rx_buffer_info is not NULL");
888 /* alloc extra element so in rx path
889 * we can always prefetch rx_info + 1
891 size
= sizeof(struct ena_rx_buffer
) * (rx_ring
->ring_size
+ 1);
892 node
= cpu_to_node(ena_irq
->cpu
);
894 rx_ring
->rx_buffer_info
= vzalloc_node(size
, node
);
895 if (!rx_ring
->rx_buffer_info
) {
896 rx_ring
->rx_buffer_info
= vzalloc(size
);
897 if (!rx_ring
->rx_buffer_info
)
901 size
= sizeof(u16
) * rx_ring
->ring_size
;
902 rx_ring
->free_ids
= vzalloc_node(size
, node
);
903 if (!rx_ring
->free_ids
) {
904 rx_ring
->free_ids
= vzalloc(size
);
905 if (!rx_ring
->free_ids
) {
906 vfree(rx_ring
->rx_buffer_info
);
907 rx_ring
->rx_buffer_info
= NULL
;
912 /* Req id ring for receiving RX pkts out of order */
913 for (i
= 0; i
< rx_ring
->ring_size
; i
++)
914 rx_ring
->free_ids
[i
] = i
;
916 /* Reset rx statistics */
917 memset(&rx_ring
->rx_stats
, 0x0, sizeof(rx_ring
->rx_stats
));
919 rx_ring
->next_to_clean
= 0;
920 rx_ring
->next_to_use
= 0;
921 rx_ring
->cpu
= ena_irq
->cpu
;
922 rx_ring
->numa_node
= node
;
927 /* ena_free_rx_resources - Free I/O Rx Resources
928 * @adapter: network interface device structure
931 * Free all receive software resources
933 static void ena_free_rx_resources(struct ena_adapter
*adapter
,
936 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[qid
];
938 vfree(rx_ring
->rx_buffer_info
);
939 rx_ring
->rx_buffer_info
= NULL
;
941 vfree(rx_ring
->free_ids
);
942 rx_ring
->free_ids
= NULL
;
945 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
946 * @adapter: board private structure
948 * Return 0 on success, negative on failure
950 static int ena_setup_all_rx_resources(struct ena_adapter
*adapter
)
954 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
955 rc
= ena_setup_rx_resources(adapter
, i
);
964 netif_err(adapter
, ifup
, adapter
->netdev
,
965 "Rx queue %d: allocation failed\n", i
);
967 /* rewind the index freeing the rings as we go */
969 ena_free_rx_resources(adapter
, i
);
973 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
974 * @adapter: board private structure
976 * Free all receive software resources
978 static void ena_free_all_io_rx_resources(struct ena_adapter
*adapter
)
982 for (i
= 0; i
< adapter
->num_io_queues
; i
++)
983 ena_free_rx_resources(adapter
, i
);
986 static struct page
*ena_alloc_map_page(struct ena_ring
*rx_ring
,
991 /* This would allocate the page on the same NUMA node the executing code
994 page
= dev_alloc_page();
996 ena_increase_stat(&rx_ring
->rx_stats
.page_alloc_fail
, 1,
998 return ERR_PTR(-ENOSPC
);
1001 /* To enable NIC-side port-mirroring, AKA SPAN port,
1002 * we make the buffer readable from the nic as well
1004 *dma
= dma_map_page(rx_ring
->dev
, page
, 0, ENA_PAGE_SIZE
,
1006 if (unlikely(dma_mapping_error(rx_ring
->dev
, *dma
))) {
1007 ena_increase_stat(&rx_ring
->rx_stats
.dma_mapping_err
, 1,
1010 return ERR_PTR(-EIO
);
1016 static int ena_alloc_rx_buffer(struct ena_ring
*rx_ring
,
1017 struct ena_rx_buffer
*rx_info
)
1019 int headroom
= rx_ring
->rx_headroom
;
1020 struct ena_com_buf
*ena_buf
;
1025 /* restore page offset value in case it has been changed by device */
1026 rx_info
->buf_offset
= headroom
;
1028 /* if previous allocated page is not used */
1029 if (unlikely(rx_info
->page
))
1032 /* We handle DMA here */
1033 page
= ena_alloc_map_page(rx_ring
, &dma
);
1034 if (unlikely(IS_ERR(page
)))
1035 return PTR_ERR(page
);
1037 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1038 "Allocate page %p, rx_info %p\n", page
, rx_info
);
1040 tailroom
= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
1042 rx_info
->page
= page
;
1043 rx_info
->dma_addr
= dma
;
1044 rx_info
->page_offset
= 0;
1045 ena_buf
= &rx_info
->ena_buf
;
1046 ena_buf
->paddr
= dma
+ headroom
;
1047 ena_buf
->len
= ENA_PAGE_SIZE
- headroom
- tailroom
;
1052 static void ena_unmap_rx_buff_attrs(struct ena_ring
*rx_ring
,
1053 struct ena_rx_buffer
*rx_info
,
1054 unsigned long attrs
)
1056 dma_unmap_page_attrs(rx_ring
->dev
, rx_info
->dma_addr
, ENA_PAGE_SIZE
,
1057 DMA_BIDIRECTIONAL
, attrs
);
1060 static void ena_free_rx_page(struct ena_ring
*rx_ring
,
1061 struct ena_rx_buffer
*rx_info
)
1063 struct page
*page
= rx_info
->page
;
1065 if (unlikely(!page
)) {
1066 netif_warn(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
1067 "Trying to free unallocated buffer\n");
1071 ena_unmap_rx_buff_attrs(rx_ring
, rx_info
, 0);
1074 rx_info
->page
= NULL
;
1077 static int ena_refill_rx_bufs(struct ena_ring
*rx_ring
, u32 num
)
1079 u16 next_to_use
, req_id
;
1083 next_to_use
= rx_ring
->next_to_use
;
1085 for (i
= 0; i
< num
; i
++) {
1086 struct ena_rx_buffer
*rx_info
;
1088 req_id
= rx_ring
->free_ids
[next_to_use
];
1090 rx_info
= &rx_ring
->rx_buffer_info
[req_id
];
1092 rc
= ena_alloc_rx_buffer(rx_ring
, rx_info
);
1093 if (unlikely(rc
< 0)) {
1094 netif_warn(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
1095 "Failed to allocate buffer for rx queue %d\n",
1099 rc
= ena_com_add_single_rx_desc(rx_ring
->ena_com_io_sq
,
1103 netif_warn(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1104 "Failed to add buffer for rx queue %d\n",
1108 next_to_use
= ENA_RX_RING_IDX_NEXT(next_to_use
,
1109 rx_ring
->ring_size
);
1112 if (unlikely(i
< num
)) {
1113 ena_increase_stat(&rx_ring
->rx_stats
.refil_partial
, 1,
1115 netif_warn(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
1116 "Refilled rx qid %d with only %d buffers (from %d)\n",
1117 rx_ring
->qid
, i
, num
);
1120 /* ena_com_write_sq_doorbell issues a wmb() */
1122 ena_com_write_sq_doorbell(rx_ring
->ena_com_io_sq
);
1124 rx_ring
->next_to_use
= next_to_use
;
1129 static void ena_free_rx_bufs(struct ena_adapter
*adapter
,
1132 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[qid
];
1135 for (i
= 0; i
< rx_ring
->ring_size
; i
++) {
1136 struct ena_rx_buffer
*rx_info
= &rx_ring
->rx_buffer_info
[i
];
1139 ena_free_rx_page(rx_ring
, rx_info
);
1143 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
1144 * @adapter: board private structure
1146 static void ena_refill_all_rx_bufs(struct ena_adapter
*adapter
)
1148 struct ena_ring
*rx_ring
;
1149 int i
, rc
, bufs_num
;
1151 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
1152 rx_ring
= &adapter
->rx_ring
[i
];
1153 bufs_num
= rx_ring
->ring_size
- 1;
1154 rc
= ena_refill_rx_bufs(rx_ring
, bufs_num
);
1156 if (unlikely(rc
!= bufs_num
))
1157 netif_warn(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1158 "Refilling Queue %d failed. allocated %d buffers from: %d\n",
1163 static void ena_free_all_rx_bufs(struct ena_adapter
*adapter
)
1167 for (i
= 0; i
< adapter
->num_io_queues
; i
++)
1168 ena_free_rx_bufs(adapter
, i
);
1171 static void ena_unmap_tx_buff(struct ena_ring
*tx_ring
,
1172 struct ena_tx_buffer
*tx_info
)
1174 struct ena_com_buf
*ena_buf
;
1178 ena_buf
= tx_info
->bufs
;
1179 cnt
= tx_info
->num_of_bufs
;
1184 if (tx_info
->map_linear_data
) {
1185 dma_unmap_single(tx_ring
->dev
,
1186 dma_unmap_addr(ena_buf
, paddr
),
1187 dma_unmap_len(ena_buf
, len
),
1193 /* unmap remaining mapped pages */
1194 for (i
= 0; i
< cnt
; i
++) {
1195 dma_unmap_page(tx_ring
->dev
, dma_unmap_addr(ena_buf
, paddr
),
1196 dma_unmap_len(ena_buf
, len
), DMA_TO_DEVICE
);
1201 /* ena_free_tx_bufs - Free Tx Buffers per Queue
1202 * @tx_ring: TX ring for which buffers be freed
1204 static void ena_free_tx_bufs(struct ena_ring
*tx_ring
)
1206 bool print_once
= true;
1209 for (i
= 0; i
< tx_ring
->ring_size
; i
++) {
1210 struct ena_tx_buffer
*tx_info
= &tx_ring
->tx_buffer_info
[i
];
1216 netif_notice(tx_ring
->adapter
, ifdown
, tx_ring
->netdev
,
1217 "Free uncompleted tx skb qid %d idx 0x%x\n",
1221 netif_dbg(tx_ring
->adapter
, ifdown
, tx_ring
->netdev
,
1222 "Free uncompleted tx skb qid %d idx 0x%x\n",
1226 ena_unmap_tx_buff(tx_ring
, tx_info
);
1228 dev_kfree_skb_any(tx_info
->skb
);
1230 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring
->netdev
,
1234 static void ena_free_all_tx_bufs(struct ena_adapter
*adapter
)
1236 struct ena_ring
*tx_ring
;
1239 for (i
= 0; i
< adapter
->num_io_queues
+ adapter
->xdp_num_queues
; i
++) {
1240 tx_ring
= &adapter
->tx_ring
[i
];
1241 ena_free_tx_bufs(tx_ring
);
1245 static void ena_destroy_all_tx_queues(struct ena_adapter
*adapter
)
1250 for (i
= 0; i
< adapter
->num_io_queues
+ adapter
->xdp_num_queues
; i
++) {
1251 ena_qid
= ENA_IO_TXQ_IDX(i
);
1252 ena_com_destroy_io_queue(adapter
->ena_dev
, ena_qid
);
1256 static void ena_destroy_all_rx_queues(struct ena_adapter
*adapter
)
1261 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
1262 ena_qid
= ENA_IO_RXQ_IDX(i
);
1263 cancel_work_sync(&adapter
->ena_napi
[i
].dim
.work
);
1264 ena_com_destroy_io_queue(adapter
->ena_dev
, ena_qid
);
1268 static void ena_destroy_all_io_queues(struct ena_adapter
*adapter
)
1270 ena_destroy_all_tx_queues(adapter
);
1271 ena_destroy_all_rx_queues(adapter
);
1274 static int handle_invalid_req_id(struct ena_ring
*ring
, u16 req_id
,
1275 struct ena_tx_buffer
*tx_info
, bool is_xdp
)
1278 netif_err(ring
->adapter
,
1281 "tx_info doesn't have valid %s. qid %u req_id %u",
1282 is_xdp
? "xdp frame" : "skb", ring
->qid
, req_id
);
1284 netif_err(ring
->adapter
,
1287 "Invalid req_id %u in qid %u\n",
1290 ena_increase_stat(&ring
->tx_stats
.bad_req_id
, 1, &ring
->syncp
);
1291 ena_reset_device(ring
->adapter
, ENA_REGS_RESET_INV_TX_REQ_ID
);
1296 static int validate_tx_req_id(struct ena_ring
*tx_ring
, u16 req_id
)
1298 struct ena_tx_buffer
*tx_info
;
1300 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
1301 if (likely(tx_info
->skb
))
1304 return handle_invalid_req_id(tx_ring
, req_id
, tx_info
, false);
1307 static int validate_xdp_req_id(struct ena_ring
*xdp_ring
, u16 req_id
)
1309 struct ena_tx_buffer
*tx_info
;
1311 tx_info
= &xdp_ring
->tx_buffer_info
[req_id
];
1312 if (likely(tx_info
->xdpf
))
1315 return handle_invalid_req_id(xdp_ring
, req_id
, tx_info
, true);
1318 static int ena_clean_tx_irq(struct ena_ring
*tx_ring
, u32 budget
)
1320 struct netdev_queue
*txq
;
1329 next_to_clean
= tx_ring
->next_to_clean
;
1330 txq
= netdev_get_tx_queue(tx_ring
->netdev
, tx_ring
->qid
);
1332 while (tx_pkts
< budget
) {
1333 struct ena_tx_buffer
*tx_info
;
1334 struct sk_buff
*skb
;
1336 rc
= ena_com_tx_comp_req_id_get(tx_ring
->ena_com_io_cq
,
1339 if (unlikely(rc
== -EINVAL
))
1340 handle_invalid_req_id(tx_ring
, req_id
, NULL
,
1345 /* validate that the request id points to a valid skb */
1346 rc
= validate_tx_req_id(tx_ring
, req_id
);
1350 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
1353 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
1354 prefetch(&skb
->end
);
1356 tx_info
->skb
= NULL
;
1357 tx_info
->last_jiffies
= 0;
1359 ena_unmap_tx_buff(tx_ring
, tx_info
);
1361 netif_dbg(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
1362 "tx_poll: q %d skb %p completed\n", tx_ring
->qid
,
1365 tx_bytes
+= skb
->len
;
1368 total_done
+= tx_info
->tx_descs
;
1370 tx_ring
->free_ids
[next_to_clean
] = req_id
;
1371 next_to_clean
= ENA_TX_RING_IDX_NEXT(next_to_clean
,
1372 tx_ring
->ring_size
);
1375 tx_ring
->next_to_clean
= next_to_clean
;
1376 ena_com_comp_ack(tx_ring
->ena_com_io_sq
, total_done
);
1377 ena_com_update_dev_comp_head(tx_ring
->ena_com_io_cq
);
1379 netdev_tx_completed_queue(txq
, tx_pkts
, tx_bytes
);
1381 netif_dbg(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
1382 "tx_poll: q %d done. total pkts: %d\n",
1383 tx_ring
->qid
, tx_pkts
);
1385 /* need to make the rings circular update visible to
1386 * ena_start_xmit() before checking for netif_queue_stopped().
1390 above_thresh
= ena_com_sq_have_enough_space(tx_ring
->ena_com_io_sq
,
1391 ENA_TX_WAKEUP_THRESH
);
1392 if (unlikely(netif_tx_queue_stopped(txq
) && above_thresh
)) {
1393 __netif_tx_lock(txq
, smp_processor_id());
1395 ena_com_sq_have_enough_space(tx_ring
->ena_com_io_sq
,
1396 ENA_TX_WAKEUP_THRESH
);
1397 if (netif_tx_queue_stopped(txq
) && above_thresh
&&
1398 test_bit(ENA_FLAG_DEV_UP
, &tx_ring
->adapter
->flags
)) {
1399 netif_tx_wake_queue(txq
);
1400 ena_increase_stat(&tx_ring
->tx_stats
.queue_wakeup
, 1,
1403 __netif_tx_unlock(txq
);
1409 static struct sk_buff
*ena_alloc_skb(struct ena_ring
*rx_ring
, void *first_frag
, u16 len
)
1411 struct sk_buff
*skb
;
1414 skb
= napi_alloc_skb(rx_ring
->napi
, len
);
1416 skb
= napi_build_skb(first_frag
, len
);
1418 if (unlikely(!skb
)) {
1419 ena_increase_stat(&rx_ring
->rx_stats
.skb_alloc_fail
, 1,
1422 netif_dbg(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
1423 "Failed to allocate skb. first_frag %s\n",
1424 first_frag
? "provided" : "not provided");
1430 static bool ena_try_rx_buf_page_reuse(struct ena_rx_buffer
*rx_info
, u16 buf_len
,
1431 u16 len
, int pkt_offset
)
1433 struct ena_com_buf
*ena_buf
= &rx_info
->ena_buf
;
1435 /* More than ENA_MIN_RX_BUF_SIZE left in the reused buffer
1436 * for data + headroom + tailroom.
1438 if (SKB_DATA_ALIGN(len
+ pkt_offset
) + ENA_MIN_RX_BUF_SIZE
<= ena_buf
->len
) {
1439 page_ref_inc(rx_info
->page
);
1440 rx_info
->page_offset
+= buf_len
;
1441 ena_buf
->paddr
+= buf_len
;
1442 ena_buf
->len
-= buf_len
;
1449 static struct sk_buff
*ena_rx_skb(struct ena_ring
*rx_ring
,
1450 struct ena_com_rx_buf_info
*ena_bufs
,
1454 int tailroom
= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
1455 bool is_xdp_loaded
= ena_xdp_present_ring(rx_ring
);
1456 struct ena_rx_buffer
*rx_info
;
1457 struct ena_adapter
*adapter
;
1458 int page_offset
, pkt_offset
;
1459 dma_addr_t pre_reuse_paddr
;
1460 u16 len
, req_id
, buf
= 0;
1461 bool reuse_rx_buf_page
;
1462 struct sk_buff
*skb
;
1467 len
= ena_bufs
[buf
].len
;
1468 req_id
= ena_bufs
[buf
].req_id
;
1470 rx_info
= &rx_ring
->rx_buffer_info
[req_id
];
1472 if (unlikely(!rx_info
->page
)) {
1473 adapter
= rx_ring
->adapter
;
1474 netif_err(adapter
, rx_err
, rx_ring
->netdev
,
1475 "Page is NULL. qid %u req_id %u\n", rx_ring
->qid
, req_id
);
1476 ena_increase_stat(&rx_ring
->rx_stats
.bad_req_id
, 1, &rx_ring
->syncp
);
1477 ena_reset_device(adapter
, ENA_REGS_RESET_INV_RX_REQ_ID
);
1481 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1482 "rx_info %p page %p\n",
1483 rx_info
, rx_info
->page
);
1485 buf_offset
= rx_info
->buf_offset
;
1486 pkt_offset
= buf_offset
- rx_ring
->rx_headroom
;
1487 page_offset
= rx_info
->page_offset
;
1488 buf_addr
= page_address(rx_info
->page
) + page_offset
;
1490 if (len
<= rx_ring
->rx_copybreak
) {
1491 skb
= ena_alloc_skb(rx_ring
, NULL
, len
);
1495 /* sync this buffer for CPU use */
1496 dma_sync_single_for_cpu(rx_ring
->dev
,
1497 dma_unmap_addr(&rx_info
->ena_buf
, paddr
) + pkt_offset
,
1500 skb_copy_to_linear_data(skb
, buf_addr
+ buf_offset
, len
);
1501 dma_sync_single_for_device(rx_ring
->dev
,
1502 dma_unmap_addr(&rx_info
->ena_buf
, paddr
) + pkt_offset
,
1507 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1508 "RX allocated small packet. len %d.\n", skb
->len
);
1509 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
1510 rx_ring
->free_ids
[*next_to_clean
] = req_id
;
1511 *next_to_clean
= ENA_RX_RING_IDX_ADD(*next_to_clean
, descs
,
1512 rx_ring
->ring_size
);
1516 buf_len
= SKB_DATA_ALIGN(len
+ buf_offset
+ tailroom
);
1518 pre_reuse_paddr
= dma_unmap_addr(&rx_info
->ena_buf
, paddr
);
1520 /* If XDP isn't loaded try to reuse part of the RX buffer */
1521 reuse_rx_buf_page
= !is_xdp_loaded
&&
1522 ena_try_rx_buf_page_reuse(rx_info
, buf_len
, len
, pkt_offset
);
1524 dma_sync_single_for_cpu(rx_ring
->dev
,
1525 pre_reuse_paddr
+ pkt_offset
,
1529 if (!reuse_rx_buf_page
)
1530 ena_unmap_rx_buff_attrs(rx_ring
, rx_info
, DMA_ATTR_SKIP_CPU_SYNC
);
1532 skb
= ena_alloc_skb(rx_ring
, buf_addr
, buf_len
);
1536 /* Populate skb's linear part */
1537 skb_reserve(skb
, buf_offset
);
1539 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
1542 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1543 "RX skb updated. len %d. data_len %d\n",
1544 skb
->len
, skb
->data_len
);
1546 if (!reuse_rx_buf_page
)
1547 rx_info
->page
= NULL
;
1549 rx_ring
->free_ids
[*next_to_clean
] = req_id
;
1551 ENA_RX_RING_IDX_NEXT(*next_to_clean
,
1552 rx_ring
->ring_size
);
1553 if (likely(--descs
== 0))
1557 len
= ena_bufs
[buf
].len
;
1558 req_id
= ena_bufs
[buf
].req_id
;
1560 rx_info
= &rx_ring
->rx_buffer_info
[req_id
];
1562 /* rx_info->buf_offset includes rx_ring->rx_headroom */
1563 buf_offset
= rx_info
->buf_offset
;
1564 pkt_offset
= buf_offset
- rx_ring
->rx_headroom
;
1565 buf_len
= SKB_DATA_ALIGN(len
+ buf_offset
+ tailroom
);
1566 page_offset
= rx_info
->page_offset
;
1568 pre_reuse_paddr
= dma_unmap_addr(&rx_info
->ena_buf
, paddr
);
1570 reuse_rx_buf_page
= !is_xdp_loaded
&&
1571 ena_try_rx_buf_page_reuse(rx_info
, buf_len
, len
, pkt_offset
);
1573 dma_sync_single_for_cpu(rx_ring
->dev
,
1574 pre_reuse_paddr
+ pkt_offset
,
1578 if (!reuse_rx_buf_page
)
1579 ena_unmap_rx_buff_attrs(rx_ring
, rx_info
,
1580 DMA_ATTR_SKIP_CPU_SYNC
);
1582 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, rx_info
->page
,
1583 page_offset
+ buf_offset
, len
, buf_len
);
1590 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
1591 * @adapter: structure containing adapter specific data
1592 * @ena_rx_ctx: received packet context/metadata
1593 * @skb: skb currently being received and modified
1595 static void ena_rx_checksum(struct ena_ring
*rx_ring
,
1596 struct ena_com_rx_ctx
*ena_rx_ctx
,
1597 struct sk_buff
*skb
)
1599 /* Rx csum disabled */
1600 if (unlikely(!(rx_ring
->netdev
->features
& NETIF_F_RXCSUM
))) {
1601 skb
->ip_summed
= CHECKSUM_NONE
;
1605 /* For fragmented packets the checksum isn't valid */
1606 if (ena_rx_ctx
->frag
) {
1607 skb
->ip_summed
= CHECKSUM_NONE
;
1611 /* if IP and error */
1612 if (unlikely((ena_rx_ctx
->l3_proto
== ENA_ETH_IO_L3_PROTO_IPV4
) &&
1613 (ena_rx_ctx
->l3_csum_err
))) {
1614 /* ipv4 checksum error */
1615 skb
->ip_summed
= CHECKSUM_NONE
;
1616 ena_increase_stat(&rx_ring
->rx_stats
.csum_bad
, 1,
1618 netif_dbg(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
1619 "RX IPv4 header checksum error\n");
1624 if (likely((ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_TCP
) ||
1625 (ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_UDP
))) {
1626 if (unlikely(ena_rx_ctx
->l4_csum_err
)) {
1627 /* TCP/UDP checksum error */
1628 ena_increase_stat(&rx_ring
->rx_stats
.csum_bad
, 1,
1630 netif_dbg(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
1631 "RX L4 checksum error\n");
1632 skb
->ip_summed
= CHECKSUM_NONE
;
1636 if (likely(ena_rx_ctx
->l4_csum_checked
)) {
1637 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1638 ena_increase_stat(&rx_ring
->rx_stats
.csum_good
, 1,
1641 ena_increase_stat(&rx_ring
->rx_stats
.csum_unchecked
, 1,
1643 skb
->ip_summed
= CHECKSUM_NONE
;
1646 skb
->ip_summed
= CHECKSUM_NONE
;
1652 static void ena_set_rx_hash(struct ena_ring
*rx_ring
,
1653 struct ena_com_rx_ctx
*ena_rx_ctx
,
1654 struct sk_buff
*skb
)
1656 enum pkt_hash_types hash_type
;
1658 if (likely(rx_ring
->netdev
->features
& NETIF_F_RXHASH
)) {
1659 if (likely((ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_TCP
) ||
1660 (ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_UDP
)))
1662 hash_type
= PKT_HASH_TYPE_L4
;
1664 hash_type
= PKT_HASH_TYPE_NONE
;
1666 /* Override hash type if the packet is fragmented */
1667 if (ena_rx_ctx
->frag
)
1668 hash_type
= PKT_HASH_TYPE_NONE
;
1670 skb_set_hash(skb
, ena_rx_ctx
->hash
, hash_type
);
1674 static int ena_xdp_handle_buff(struct ena_ring
*rx_ring
, struct xdp_buff
*xdp
)
1676 struct ena_rx_buffer
*rx_info
;
1679 rx_info
= &rx_ring
->rx_buffer_info
[rx_ring
->ena_bufs
[0].req_id
];
1680 xdp_prepare_buff(xdp
, page_address(rx_info
->page
),
1681 rx_info
->buf_offset
,
1682 rx_ring
->ena_bufs
[0].len
, false);
1683 /* If for some reason we received a bigger packet than
1684 * we expect, then we simply drop it
1686 if (unlikely(rx_ring
->ena_bufs
[0].len
> ENA_XDP_MAX_MTU
))
1687 return ENA_XDP_DROP
;
1689 ret
= ena_xdp_execute(rx_ring
, xdp
);
1691 /* The xdp program might expand the headers */
1692 if (ret
== ENA_XDP_PASS
) {
1693 rx_info
->buf_offset
= xdp
->data
- xdp
->data_hard_start
;
1694 rx_ring
->ena_bufs
[0].len
= xdp
->data_end
- xdp
->data
;
1699 /* ena_clean_rx_irq - Cleanup RX irq
1700 * @rx_ring: RX ring to clean
1701 * @napi: napi handler
1702 * @budget: how many packets driver is allowed to clean
1704 * Returns the number of cleaned buffers.
1706 static int ena_clean_rx_irq(struct ena_ring
*rx_ring
, struct napi_struct
*napi
,
1709 u16 next_to_clean
= rx_ring
->next_to_clean
;
1710 struct ena_com_rx_ctx ena_rx_ctx
;
1711 struct ena_rx_buffer
*rx_info
;
1712 struct ena_adapter
*adapter
;
1713 u32 res_budget
, work_done
;
1714 int rx_copybreak_pkt
= 0;
1715 int refill_threshold
;
1716 struct sk_buff
*skb
;
1717 int refill_required
;
1718 struct xdp_buff xdp
;
1725 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1726 "%s qid %d\n", __func__
, rx_ring
->qid
);
1727 res_budget
= budget
;
1728 xdp_init_buff(&xdp
, ENA_PAGE_SIZE
, &rx_ring
->xdp_rxq
);
1731 xdp_verdict
= ENA_XDP_PASS
;
1733 ena_rx_ctx
.ena_bufs
= rx_ring
->ena_bufs
;
1734 ena_rx_ctx
.max_bufs
= rx_ring
->sgl_size
;
1735 ena_rx_ctx
.descs
= 0;
1736 ena_rx_ctx
.pkt_offset
= 0;
1737 rc
= ena_com_rx_pkt(rx_ring
->ena_com_io_cq
,
1738 rx_ring
->ena_com_io_sq
,
1743 if (unlikely(ena_rx_ctx
.descs
== 0))
1746 /* First descriptor might have an offset set by the device */
1747 rx_info
= &rx_ring
->rx_buffer_info
[rx_ring
->ena_bufs
[0].req_id
];
1748 rx_info
->buf_offset
+= ena_rx_ctx
.pkt_offset
;
1750 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1751 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1752 rx_ring
->qid
, ena_rx_ctx
.descs
, ena_rx_ctx
.l3_proto
,
1753 ena_rx_ctx
.l4_proto
, ena_rx_ctx
.hash
);
1755 if (ena_xdp_present_ring(rx_ring
))
1756 xdp_verdict
= ena_xdp_handle_buff(rx_ring
, &xdp
);
1758 /* allocate skb and fill it */
1759 if (xdp_verdict
== ENA_XDP_PASS
)
1760 skb
= ena_rx_skb(rx_ring
,
1765 if (unlikely(!skb
)) {
1766 for (i
= 0; i
< ena_rx_ctx
.descs
; i
++) {
1767 int req_id
= rx_ring
->ena_bufs
[i
].req_id
;
1769 rx_ring
->free_ids
[next_to_clean
] = req_id
;
1771 ENA_RX_RING_IDX_NEXT(next_to_clean
,
1772 rx_ring
->ring_size
);
1774 /* Packets was passed for transmission, unmap it
1777 if (xdp_verdict
& ENA_XDP_FORWARDED
) {
1778 ena_unmap_rx_buff_attrs(rx_ring
,
1779 &rx_ring
->rx_buffer_info
[req_id
],
1781 rx_ring
->rx_buffer_info
[req_id
].page
= NULL
;
1784 if (xdp_verdict
!= ENA_XDP_PASS
) {
1785 xdp_flags
|= xdp_verdict
;
1786 total_len
+= ena_rx_ctx
.ena_bufs
[0].len
;
1793 ena_rx_checksum(rx_ring
, &ena_rx_ctx
, skb
);
1795 ena_set_rx_hash(rx_ring
, &ena_rx_ctx
, skb
);
1797 skb_record_rx_queue(skb
, rx_ring
->qid
);
1799 if (rx_ring
->ena_bufs
[0].len
<= rx_ring
->rx_copybreak
)
1802 total_len
+= skb
->len
;
1804 napi_gro_receive(napi
, skb
);
1807 } while (likely(res_budget
));
1809 work_done
= budget
- res_budget
;
1810 rx_ring
->per_napi_packets
+= work_done
;
1811 u64_stats_update_begin(&rx_ring
->syncp
);
1812 rx_ring
->rx_stats
.bytes
+= total_len
;
1813 rx_ring
->rx_stats
.cnt
+= work_done
;
1814 rx_ring
->rx_stats
.rx_copybreak_pkt
+= rx_copybreak_pkt
;
1815 u64_stats_update_end(&rx_ring
->syncp
);
1817 rx_ring
->next_to_clean
= next_to_clean
;
1819 refill_required
= ena_com_free_q_entries(rx_ring
->ena_com_io_sq
);
1821 min_t(int, rx_ring
->ring_size
/ ENA_RX_REFILL_THRESH_DIVIDER
,
1822 ENA_RX_REFILL_THRESH_PACKET
);
1824 /* Optimization, try to batch new rx buffers */
1825 if (refill_required
> refill_threshold
) {
1826 ena_com_update_dev_comp_head(rx_ring
->ena_com_io_cq
);
1827 ena_refill_rx_bufs(rx_ring
, refill_required
);
1830 if (xdp_flags
& ENA_XDP_REDIRECT
)
1836 if (xdp_flags
& ENA_XDP_REDIRECT
)
1839 adapter
= netdev_priv(rx_ring
->netdev
);
1841 if (rc
== -ENOSPC
) {
1842 ena_increase_stat(&rx_ring
->rx_stats
.bad_desc_num
, 1,
1844 ena_reset_device(adapter
, ENA_REGS_RESET_TOO_MANY_RX_DESCS
);
1846 ena_increase_stat(&rx_ring
->rx_stats
.bad_req_id
, 1,
1848 ena_reset_device(adapter
, ENA_REGS_RESET_INV_RX_REQ_ID
);
1853 static void ena_dim_work(struct work_struct
*w
)
1855 struct dim
*dim
= container_of(w
, struct dim
, work
);
1856 struct dim_cq_moder cur_moder
=
1857 net_dim_get_rx_moderation(dim
->mode
, dim
->profile_ix
);
1858 struct ena_napi
*ena_napi
= container_of(dim
, struct ena_napi
, dim
);
1860 ena_napi
->rx_ring
->smoothed_interval
= cur_moder
.usec
;
1861 dim
->state
= DIM_START_MEASURE
;
1864 static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi
*ena_napi
)
1866 struct dim_sample dim_sample
;
1867 struct ena_ring
*rx_ring
= ena_napi
->rx_ring
;
1869 if (!rx_ring
->per_napi_packets
)
1872 rx_ring
->non_empty_napi_events
++;
1874 dim_update_sample(rx_ring
->non_empty_napi_events
,
1875 rx_ring
->rx_stats
.cnt
,
1876 rx_ring
->rx_stats
.bytes
,
1879 net_dim(&ena_napi
->dim
, dim_sample
);
1881 rx_ring
->per_napi_packets
= 0;
1884 static void ena_unmask_interrupt(struct ena_ring
*tx_ring
,
1885 struct ena_ring
*rx_ring
)
1887 u32 rx_interval
= tx_ring
->smoothed_interval
;
1888 struct ena_eth_io_intr_reg intr_reg
;
1890 /* Rx ring can be NULL when for XDP tx queues which don't have an
1891 * accompanying rx_ring pair.
1894 rx_interval
= ena_com_get_adaptive_moderation_enabled(rx_ring
->ena_dev
) ?
1895 rx_ring
->smoothed_interval
:
1896 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring
->ena_dev
);
1898 /* Update intr register: rx intr delay,
1899 * tx intr delay and interrupt unmask
1901 ena_com_update_intr_reg(&intr_reg
,
1903 tx_ring
->smoothed_interval
,
1906 ena_increase_stat(&tx_ring
->tx_stats
.unmask_interrupt
, 1,
1909 /* It is a shared MSI-X.
1910 * Tx and Rx CQ have pointer to it.
1911 * So we use one of them to reach the intr reg
1912 * The Tx ring is used because the rx_ring is NULL for XDP queues
1914 ena_com_unmask_intr(tx_ring
->ena_com_io_cq
, &intr_reg
);
1917 static void ena_update_ring_numa_node(struct ena_ring
*tx_ring
,
1918 struct ena_ring
*rx_ring
)
1920 int cpu
= get_cpu();
1923 /* Check only one ring since the 2 rings are running on the same cpu */
1924 if (likely(tx_ring
->cpu
== cpu
))
1931 numa_node
= cpu_to_node(cpu
);
1933 if (likely(tx_ring
->numa_node
== numa_node
))
1938 if (numa_node
!= NUMA_NO_NODE
) {
1939 ena_com_update_numa_node(tx_ring
->ena_com_io_cq
, numa_node
);
1940 tx_ring
->numa_node
= numa_node
;
1942 rx_ring
->numa_node
= numa_node
;
1943 ena_com_update_numa_node(rx_ring
->ena_com_io_cq
,
1953 static int ena_clean_xdp_irq(struct ena_ring
*xdp_ring
, u32 budget
)
1961 if (unlikely(!xdp_ring
))
1963 next_to_clean
= xdp_ring
->next_to_clean
;
1965 while (tx_pkts
< budget
) {
1966 struct ena_tx_buffer
*tx_info
;
1967 struct xdp_frame
*xdpf
;
1969 rc
= ena_com_tx_comp_req_id_get(xdp_ring
->ena_com_io_cq
,
1972 if (unlikely(rc
== -EINVAL
))
1973 handle_invalid_req_id(xdp_ring
, req_id
, NULL
,
1978 /* validate that the request id points to a valid xdp_frame */
1979 rc
= validate_xdp_req_id(xdp_ring
, req_id
);
1983 tx_info
= &xdp_ring
->tx_buffer_info
[req_id
];
1984 xdpf
= tx_info
->xdpf
;
1986 tx_info
->xdpf
= NULL
;
1987 tx_info
->last_jiffies
= 0;
1988 ena_unmap_tx_buff(xdp_ring
, tx_info
);
1990 netif_dbg(xdp_ring
->adapter
, tx_done
, xdp_ring
->netdev
,
1991 "tx_poll: q %d skb %p completed\n", xdp_ring
->qid
,
1995 total_done
+= tx_info
->tx_descs
;
1997 xdp_return_frame(xdpf
);
1998 xdp_ring
->free_ids
[next_to_clean
] = req_id
;
1999 next_to_clean
= ENA_TX_RING_IDX_NEXT(next_to_clean
,
2000 xdp_ring
->ring_size
);
2003 xdp_ring
->next_to_clean
= next_to_clean
;
2004 ena_com_comp_ack(xdp_ring
->ena_com_io_sq
, total_done
);
2005 ena_com_update_dev_comp_head(xdp_ring
->ena_com_io_cq
);
2007 netif_dbg(xdp_ring
->adapter
, tx_done
, xdp_ring
->netdev
,
2008 "tx_poll: q %d done. total pkts: %d\n",
2009 xdp_ring
->qid
, tx_pkts
);
2014 static int ena_io_poll(struct napi_struct
*napi
, int budget
)
2016 struct ena_napi
*ena_napi
= container_of(napi
, struct ena_napi
, napi
);
2017 struct ena_ring
*tx_ring
, *rx_ring
;
2019 int rx_work_done
= 0;
2021 int napi_comp_call
= 0;
2024 tx_ring
= ena_napi
->tx_ring
;
2025 rx_ring
= ena_napi
->rx_ring
;
2027 tx_budget
= tx_ring
->ring_size
/ ENA_TX_POLL_BUDGET_DIVIDER
;
2029 if (!test_bit(ENA_FLAG_DEV_UP
, &tx_ring
->adapter
->flags
) ||
2030 test_bit(ENA_FLAG_TRIGGER_RESET
, &tx_ring
->adapter
->flags
)) {
2031 napi_complete_done(napi
, 0);
2035 tx_work_done
= ena_clean_tx_irq(tx_ring
, tx_budget
);
2036 /* On netpoll the budget is zero and the handler should only clean the
2040 rx_work_done
= ena_clean_rx_irq(rx_ring
, napi
, budget
);
2042 /* If the device is about to reset or down, avoid unmask
2043 * the interrupt and return 0 so NAPI won't reschedule
2045 if (unlikely(!test_bit(ENA_FLAG_DEV_UP
, &tx_ring
->adapter
->flags
) ||
2046 test_bit(ENA_FLAG_TRIGGER_RESET
, &tx_ring
->adapter
->flags
))) {
2047 napi_complete_done(napi
, 0);
2050 } else if ((budget
> rx_work_done
) && (tx_budget
> tx_work_done
)) {
2053 /* Update numa and unmask the interrupt only when schedule
2054 * from the interrupt context (vs from sk_busy_loop)
2056 if (napi_complete_done(napi
, rx_work_done
) &&
2057 READ_ONCE(ena_napi
->interrupts_masked
)) {
2058 smp_rmb(); /* make sure interrupts_masked is read */
2059 WRITE_ONCE(ena_napi
->interrupts_masked
, false);
2060 /* We apply adaptive moderation on Rx path only.
2061 * Tx uses static interrupt moderation.
2063 if (ena_com_get_adaptive_moderation_enabled(rx_ring
->ena_dev
))
2064 ena_adjust_adaptive_rx_intr_moderation(ena_napi
);
2066 ena_update_ring_numa_node(tx_ring
, rx_ring
);
2067 ena_unmask_interrupt(tx_ring
, rx_ring
);
2075 u64_stats_update_begin(&tx_ring
->syncp
);
2076 tx_ring
->tx_stats
.napi_comp
+= napi_comp_call
;
2077 tx_ring
->tx_stats
.tx_poll
++;
2078 u64_stats_update_end(&tx_ring
->syncp
);
2080 tx_ring
->tx_stats
.last_napi_jiffies
= jiffies
;
2085 static irqreturn_t
ena_intr_msix_mgmnt(int irq
, void *data
)
2087 struct ena_adapter
*adapter
= (struct ena_adapter
*)data
;
2089 ena_com_admin_q_comp_intr_handler(adapter
->ena_dev
);
2091 /* Don't call the aenq handler before probe is done */
2092 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
)))
2093 ena_com_aenq_intr_handler(adapter
->ena_dev
, data
);
2098 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
2099 * @irq: interrupt number
2100 * @data: pointer to a network interface private napi device structure
2102 static irqreturn_t
ena_intr_msix_io(int irq
, void *data
)
2104 struct ena_napi
*ena_napi
= data
;
2106 /* Used to check HW health */
2107 WRITE_ONCE(ena_napi
->first_interrupt
, true);
2109 WRITE_ONCE(ena_napi
->interrupts_masked
, true);
2110 smp_wmb(); /* write interrupts_masked before calling napi */
2112 napi_schedule_irqoff(&ena_napi
->napi
);
2117 /* Reserve a single MSI-X vector for management (admin + aenq).
2118 * plus reserve one vector for each potential io queue.
2119 * the number of potential io queues is the minimum of what the device
2120 * supports and the number of vCPUs.
2122 static int ena_enable_msix(struct ena_adapter
*adapter
)
2124 int msix_vecs
, irq_cnt
;
2126 if (test_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
)) {
2127 netif_err(adapter
, probe
, adapter
->netdev
,
2128 "Error, MSI-X is already enabled\n");
2132 /* Reserved the max msix vectors we might need */
2133 msix_vecs
= ENA_MAX_MSIX_VEC(adapter
->max_num_io_queues
);
2134 netif_dbg(adapter
, probe
, adapter
->netdev
,
2135 "Trying to enable MSI-X, vectors %d\n", msix_vecs
);
2137 irq_cnt
= pci_alloc_irq_vectors(adapter
->pdev
, ENA_MIN_MSIX_VEC
,
2138 msix_vecs
, PCI_IRQ_MSIX
);
2141 netif_err(adapter
, probe
, adapter
->netdev
,
2142 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt
);
2146 if (irq_cnt
!= msix_vecs
) {
2147 netif_notice(adapter
, probe
, adapter
->netdev
,
2148 "Enable only %d MSI-X (out of %d), reduce the number of queues\n",
2149 irq_cnt
, msix_vecs
);
2150 adapter
->num_io_queues
= irq_cnt
- ENA_ADMIN_MSIX_VEC
;
2153 if (ena_init_rx_cpu_rmap(adapter
))
2154 netif_warn(adapter
, probe
, adapter
->netdev
,
2155 "Failed to map IRQs to CPUs\n");
2157 adapter
->msix_vecs
= irq_cnt
;
2158 set_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
);
2163 static void ena_setup_mgmnt_intr(struct ena_adapter
*adapter
)
2167 snprintf(adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].name
,
2168 ENA_IRQNAME_SIZE
, "ena-mgmnt@pci:%s",
2169 pci_name(adapter
->pdev
));
2170 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].handler
=
2171 ena_intr_msix_mgmnt
;
2172 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].data
= adapter
;
2173 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].vector
=
2174 pci_irq_vector(adapter
->pdev
, ENA_MGMNT_IRQ_IDX
);
2175 cpu
= cpumask_first(cpu_online_mask
);
2176 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].cpu
= cpu
;
2177 cpumask_set_cpu(cpu
,
2178 &adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].affinity_hint_mask
);
2181 static void ena_setup_io_intr(struct ena_adapter
*adapter
)
2183 struct net_device
*netdev
;
2184 int irq_idx
, i
, cpu
;
2187 netdev
= adapter
->netdev
;
2188 io_queue_count
= adapter
->num_io_queues
+ adapter
->xdp_num_queues
;
2190 for (i
= 0; i
< io_queue_count
; i
++) {
2191 irq_idx
= ENA_IO_IRQ_IDX(i
);
2192 cpu
= i
% num_online_cpus();
2194 snprintf(adapter
->irq_tbl
[irq_idx
].name
, ENA_IRQNAME_SIZE
,
2195 "%s-Tx-Rx-%d", netdev
->name
, i
);
2196 adapter
->irq_tbl
[irq_idx
].handler
= ena_intr_msix_io
;
2197 adapter
->irq_tbl
[irq_idx
].data
= &adapter
->ena_napi
[i
];
2198 adapter
->irq_tbl
[irq_idx
].vector
=
2199 pci_irq_vector(adapter
->pdev
, irq_idx
);
2200 adapter
->irq_tbl
[irq_idx
].cpu
= cpu
;
2202 cpumask_set_cpu(cpu
,
2203 &adapter
->irq_tbl
[irq_idx
].affinity_hint_mask
);
2207 static int ena_request_mgmnt_irq(struct ena_adapter
*adapter
)
2209 unsigned long flags
= 0;
2210 struct ena_irq
*irq
;
2213 irq
= &adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
];
2214 rc
= request_irq(irq
->vector
, irq
->handler
, flags
, irq
->name
,
2217 netif_err(adapter
, probe
, adapter
->netdev
,
2218 "Failed to request admin irq\n");
2222 netif_dbg(adapter
, probe
, adapter
->netdev
,
2223 "Set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
2224 irq
->affinity_hint_mask
.bits
[0], irq
->vector
);
2226 irq_set_affinity_hint(irq
->vector
, &irq
->affinity_hint_mask
);
2231 static int ena_request_io_irq(struct ena_adapter
*adapter
)
2233 u32 io_queue_count
= adapter
->num_io_queues
+ adapter
->xdp_num_queues
;
2234 unsigned long flags
= 0;
2235 struct ena_irq
*irq
;
2238 if (!test_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
)) {
2239 netif_err(adapter
, ifup
, adapter
->netdev
,
2240 "Failed to request I/O IRQ: MSI-X is not enabled\n");
2244 for (i
= ENA_IO_IRQ_FIRST_IDX
; i
< ENA_MAX_MSIX_VEC(io_queue_count
); i
++) {
2245 irq
= &adapter
->irq_tbl
[i
];
2246 rc
= request_irq(irq
->vector
, irq
->handler
, flags
, irq
->name
,
2249 netif_err(adapter
, ifup
, adapter
->netdev
,
2250 "Failed to request I/O IRQ. index %d rc %d\n",
2255 netif_dbg(adapter
, ifup
, adapter
->netdev
,
2256 "Set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
2257 i
, irq
->affinity_hint_mask
.bits
[0], irq
->vector
);
2259 irq_set_affinity_hint(irq
->vector
, &irq
->affinity_hint_mask
);
2265 for (k
= ENA_IO_IRQ_FIRST_IDX
; k
< i
; k
++) {
2266 irq
= &adapter
->irq_tbl
[k
];
2267 free_irq(irq
->vector
, irq
->data
);
2273 static void ena_free_mgmnt_irq(struct ena_adapter
*adapter
)
2275 struct ena_irq
*irq
;
2277 irq
= &adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
];
2278 synchronize_irq(irq
->vector
);
2279 irq_set_affinity_hint(irq
->vector
, NULL
);
2280 free_irq(irq
->vector
, irq
->data
);
2283 static void ena_free_io_irq(struct ena_adapter
*adapter
)
2285 u32 io_queue_count
= adapter
->num_io_queues
+ adapter
->xdp_num_queues
;
2286 struct ena_irq
*irq
;
2289 #ifdef CONFIG_RFS_ACCEL
2290 if (adapter
->msix_vecs
>= 1) {
2291 free_irq_cpu_rmap(adapter
->netdev
->rx_cpu_rmap
);
2292 adapter
->netdev
->rx_cpu_rmap
= NULL
;
2294 #endif /* CONFIG_RFS_ACCEL */
2296 for (i
= ENA_IO_IRQ_FIRST_IDX
; i
< ENA_MAX_MSIX_VEC(io_queue_count
); i
++) {
2297 irq
= &adapter
->irq_tbl
[i
];
2298 irq_set_affinity_hint(irq
->vector
, NULL
);
2299 free_irq(irq
->vector
, irq
->data
);
2303 static void ena_disable_msix(struct ena_adapter
*adapter
)
2305 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
))
2306 pci_free_irq_vectors(adapter
->pdev
);
2309 static void ena_disable_io_intr_sync(struct ena_adapter
*adapter
)
2311 u32 io_queue_count
= adapter
->num_io_queues
+ adapter
->xdp_num_queues
;
2314 if (!netif_running(adapter
->netdev
))
2317 for (i
= ENA_IO_IRQ_FIRST_IDX
; i
< ENA_MAX_MSIX_VEC(io_queue_count
); i
++)
2318 synchronize_irq(adapter
->irq_tbl
[i
].vector
);
2321 static void ena_del_napi_in_range(struct ena_adapter
*adapter
,
2327 for (i
= first_index
; i
< first_index
+ count
; i
++) {
2328 netif_napi_del(&adapter
->ena_napi
[i
].napi
);
2330 WARN_ON(!ENA_IS_XDP_INDEX(adapter
, i
) &&
2331 adapter
->ena_napi
[i
].xdp_ring
);
2335 static void ena_init_napi_in_range(struct ena_adapter
*adapter
,
2336 int first_index
, int count
)
2340 for (i
= first_index
; i
< first_index
+ count
; i
++) {
2341 struct ena_napi
*napi
= &adapter
->ena_napi
[i
];
2343 netif_napi_add(adapter
->netdev
, &napi
->napi
,
2344 ENA_IS_XDP_INDEX(adapter
, i
) ? ena_xdp_io_poll
: ena_io_poll
);
2346 if (!ENA_IS_XDP_INDEX(adapter
, i
)) {
2347 napi
->rx_ring
= &adapter
->rx_ring
[i
];
2348 napi
->tx_ring
= &adapter
->tx_ring
[i
];
2350 napi
->xdp_ring
= &adapter
->tx_ring
[i
];
2356 static void ena_napi_disable_in_range(struct ena_adapter
*adapter
,
2362 for (i
= first_index
; i
< first_index
+ count
; i
++)
2363 napi_disable(&adapter
->ena_napi
[i
].napi
);
2366 static void ena_napi_enable_in_range(struct ena_adapter
*adapter
,
2372 for (i
= first_index
; i
< first_index
+ count
; i
++)
2373 napi_enable(&adapter
->ena_napi
[i
].napi
);
2376 /* Configure the Rx forwarding */
2377 static int ena_rss_configure(struct ena_adapter
*adapter
)
2379 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2382 /* In case the RSS table wasn't initialized by probe */
2383 if (!ena_dev
->rss
.tbl_log_size
) {
2384 rc
= ena_rss_init_default(adapter
);
2385 if (rc
&& (rc
!= -EOPNOTSUPP
)) {
2386 netif_err(adapter
, ifup
, adapter
->netdev
,
2387 "Failed to init RSS rc: %d\n", rc
);
2392 /* Set indirect table */
2393 rc
= ena_com_indirect_table_set(ena_dev
);
2394 if (unlikely(rc
&& rc
!= -EOPNOTSUPP
))
2397 /* Configure hash function (if supported) */
2398 rc
= ena_com_set_hash_function(ena_dev
);
2399 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
)))
2402 /* Configure hash inputs (if supported) */
2403 rc
= ena_com_set_hash_ctrl(ena_dev
);
2404 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
)))
2410 static int ena_up_complete(struct ena_adapter
*adapter
)
2414 rc
= ena_rss_configure(adapter
);
2418 ena_change_mtu(adapter
->netdev
, adapter
->netdev
->mtu
);
2420 ena_refill_all_rx_bufs(adapter
);
2422 /* enable transmits */
2423 netif_tx_start_all_queues(adapter
->netdev
);
2425 ena_napi_enable_in_range(adapter
,
2427 adapter
->xdp_num_queues
+ adapter
->num_io_queues
);
2432 static int ena_create_io_tx_queue(struct ena_adapter
*adapter
, int qid
)
2434 struct ena_com_create_io_ctx ctx
;
2435 struct ena_com_dev
*ena_dev
;
2436 struct ena_ring
*tx_ring
;
2441 ena_dev
= adapter
->ena_dev
;
2443 tx_ring
= &adapter
->tx_ring
[qid
];
2444 msix_vector
= ENA_IO_IRQ_IDX(qid
);
2445 ena_qid
= ENA_IO_TXQ_IDX(qid
);
2447 memset(&ctx
, 0x0, sizeof(ctx
));
2449 ctx
.direction
= ENA_COM_IO_QUEUE_DIRECTION_TX
;
2451 ctx
.mem_queue_type
= ena_dev
->tx_mem_queue_type
;
2452 ctx
.msix_vector
= msix_vector
;
2453 ctx
.queue_size
= tx_ring
->ring_size
;
2454 ctx
.numa_node
= tx_ring
->numa_node
;
2456 rc
= ena_com_create_io_queue(ena_dev
, &ctx
);
2458 netif_err(adapter
, ifup
, adapter
->netdev
,
2459 "Failed to create I/O TX queue num %d rc: %d\n",
2464 rc
= ena_com_get_io_handlers(ena_dev
, ena_qid
,
2465 &tx_ring
->ena_com_io_sq
,
2466 &tx_ring
->ena_com_io_cq
);
2468 netif_err(adapter
, ifup
, adapter
->netdev
,
2469 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
2471 ena_com_destroy_io_queue(ena_dev
, ena_qid
);
2475 ena_com_update_numa_node(tx_ring
->ena_com_io_cq
, ctx
.numa_node
);
2479 static int ena_create_io_tx_queues_in_range(struct ena_adapter
*adapter
,
2480 int first_index
, int count
)
2482 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2485 for (i
= first_index
; i
< first_index
+ count
; i
++) {
2486 rc
= ena_create_io_tx_queue(adapter
, i
);
2494 while (i
-- > first_index
)
2495 ena_com_destroy_io_queue(ena_dev
, ENA_IO_TXQ_IDX(i
));
2500 static int ena_create_io_rx_queue(struct ena_adapter
*adapter
, int qid
)
2502 struct ena_com_dev
*ena_dev
;
2503 struct ena_com_create_io_ctx ctx
;
2504 struct ena_ring
*rx_ring
;
2509 ena_dev
= adapter
->ena_dev
;
2511 rx_ring
= &adapter
->rx_ring
[qid
];
2512 msix_vector
= ENA_IO_IRQ_IDX(qid
);
2513 ena_qid
= ENA_IO_RXQ_IDX(qid
);
2515 memset(&ctx
, 0x0, sizeof(ctx
));
2518 ctx
.direction
= ENA_COM_IO_QUEUE_DIRECTION_RX
;
2519 ctx
.mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
2520 ctx
.msix_vector
= msix_vector
;
2521 ctx
.queue_size
= rx_ring
->ring_size
;
2522 ctx
.numa_node
= rx_ring
->numa_node
;
2524 rc
= ena_com_create_io_queue(ena_dev
, &ctx
);
2526 netif_err(adapter
, ifup
, adapter
->netdev
,
2527 "Failed to create I/O RX queue num %d rc: %d\n",
2532 rc
= ena_com_get_io_handlers(ena_dev
, ena_qid
,
2533 &rx_ring
->ena_com_io_sq
,
2534 &rx_ring
->ena_com_io_cq
);
2536 netif_err(adapter
, ifup
, adapter
->netdev
,
2537 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
2542 ena_com_update_numa_node(rx_ring
->ena_com_io_cq
, ctx
.numa_node
);
2546 ena_com_destroy_io_queue(ena_dev
, ena_qid
);
2550 static int ena_create_all_io_rx_queues(struct ena_adapter
*adapter
)
2552 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2555 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
2556 rc
= ena_create_io_rx_queue(adapter
, i
);
2559 INIT_WORK(&adapter
->ena_napi
[i
].dim
.work
, ena_dim_work
);
2566 cancel_work_sync(&adapter
->ena_napi
[i
].dim
.work
);
2567 ena_com_destroy_io_queue(ena_dev
, ENA_IO_RXQ_IDX(i
));
2573 static void set_io_rings_size(struct ena_adapter
*adapter
,
2579 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
2580 adapter
->tx_ring
[i
].ring_size
= new_tx_size
;
2581 adapter
->rx_ring
[i
].ring_size
= new_rx_size
;
2585 /* This function allows queue allocation to backoff when the system is
2586 * low on memory. If there is not enough memory to allocate io queues
2587 * the driver will try to allocate smaller queues.
2589 * The backoff algorithm is as follows:
2590 * 1. Try to allocate TX and RX and if successful.
2591 * 1.1. return success
2593 * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
2595 * 3. If TX or RX is smaller than 256
2596 * 3.1. return failure.
2598 * 4.1. go back to 1.
2600 static int create_queues_with_size_backoff(struct ena_adapter
*adapter
)
2602 int rc
, cur_rx_ring_size
, cur_tx_ring_size
;
2603 int new_rx_ring_size
, new_tx_ring_size
;
2605 /* current queue sizes might be set to smaller than the requested
2606 * ones due to past queue allocation failures.
2608 set_io_rings_size(adapter
, adapter
->requested_tx_ring_size
,
2609 adapter
->requested_rx_ring_size
);
2612 if (ena_xdp_present(adapter
)) {
2613 rc
= ena_setup_and_create_all_xdp_queues(adapter
);
2618 rc
= ena_setup_tx_resources_in_range(adapter
,
2620 adapter
->num_io_queues
);
2624 rc
= ena_create_io_tx_queues_in_range(adapter
,
2626 adapter
->num_io_queues
);
2628 goto err_create_tx_queues
;
2630 rc
= ena_setup_all_rx_resources(adapter
);
2634 rc
= ena_create_all_io_rx_queues(adapter
);
2636 goto err_create_rx_queues
;
2640 err_create_rx_queues
:
2641 ena_free_all_io_rx_resources(adapter
);
2643 ena_destroy_all_tx_queues(adapter
);
2644 err_create_tx_queues
:
2645 ena_free_all_io_tx_resources(adapter
);
2647 if (rc
!= -ENOMEM
) {
2648 netif_err(adapter
, ifup
, adapter
->netdev
,
2649 "Queue creation failed with error code %d\n",
2654 cur_tx_ring_size
= adapter
->tx_ring
[0].ring_size
;
2655 cur_rx_ring_size
= adapter
->rx_ring
[0].ring_size
;
2657 netif_err(adapter
, ifup
, adapter
->netdev
,
2658 "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2659 cur_tx_ring_size
, cur_rx_ring_size
);
2661 new_tx_ring_size
= cur_tx_ring_size
;
2662 new_rx_ring_size
= cur_rx_ring_size
;
2664 /* Decrease the size of the larger queue, or
2665 * decrease both if they are the same size.
2667 if (cur_rx_ring_size
<= cur_tx_ring_size
)
2668 new_tx_ring_size
= cur_tx_ring_size
/ 2;
2669 if (cur_rx_ring_size
>= cur_tx_ring_size
)
2670 new_rx_ring_size
= cur_rx_ring_size
/ 2;
2672 if (new_tx_ring_size
< ENA_MIN_RING_SIZE
||
2673 new_rx_ring_size
< ENA_MIN_RING_SIZE
) {
2674 netif_err(adapter
, ifup
, adapter
->netdev
,
2675 "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
2680 netif_err(adapter
, ifup
, adapter
->netdev
,
2681 "Retrying queue creation with sizes TX=%d, RX=%d\n",
2685 set_io_rings_size(adapter
, new_tx_ring_size
,
2690 static int ena_up(struct ena_adapter
*adapter
)
2692 int io_queue_count
, rc
, i
;
2694 netif_dbg(adapter
, ifup
, adapter
->netdev
, "%s\n", __func__
);
2696 io_queue_count
= adapter
->num_io_queues
+ adapter
->xdp_num_queues
;
2697 ena_setup_io_intr(adapter
);
2699 /* napi poll functions should be initialized before running
2700 * request_irq(), to handle a rare condition where there is a pending
2701 * interrupt, causing the ISR to fire immediately while the poll
2702 * function wasn't set yet, causing a null dereference
2704 ena_init_napi_in_range(adapter
, 0, io_queue_count
);
2706 rc
= ena_request_io_irq(adapter
);
2710 rc
= create_queues_with_size_backoff(adapter
);
2712 goto err_create_queues_with_backoff
;
2714 rc
= ena_up_complete(adapter
);
2718 if (test_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
))
2719 netif_carrier_on(adapter
->netdev
);
2721 ena_increase_stat(&adapter
->dev_stats
.interface_up
, 1,
2724 set_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
2726 /* Enable completion queues interrupt */
2727 for (i
= 0; i
< adapter
->num_io_queues
; i
++)
2728 ena_unmask_interrupt(&adapter
->tx_ring
[i
],
2729 &adapter
->rx_ring
[i
]);
2731 /* schedule napi in case we had pending packets
2732 * from the last time we disable napi
2734 for (i
= 0; i
< io_queue_count
; i
++)
2735 napi_schedule(&adapter
->ena_napi
[i
].napi
);
2740 ena_destroy_all_tx_queues(adapter
);
2741 ena_free_all_io_tx_resources(adapter
);
2742 ena_destroy_all_rx_queues(adapter
);
2743 ena_free_all_io_rx_resources(adapter
);
2744 err_create_queues_with_backoff
:
2745 ena_free_io_irq(adapter
);
2747 ena_del_napi_in_range(adapter
, 0, io_queue_count
);
2752 static void ena_down(struct ena_adapter
*adapter
)
2754 int io_queue_count
= adapter
->num_io_queues
+ adapter
->xdp_num_queues
;
2756 netif_info(adapter
, ifdown
, adapter
->netdev
, "%s\n", __func__
);
2758 clear_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
2760 ena_increase_stat(&adapter
->dev_stats
.interface_down
, 1,
2763 netif_carrier_off(adapter
->netdev
);
2764 netif_tx_disable(adapter
->netdev
);
2766 /* After this point the napi handler won't enable the tx queue */
2767 ena_napi_disable_in_range(adapter
, 0, io_queue_count
);
2769 /* After destroy the queue there won't be any new interrupts */
2771 if (test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
)) {
2774 rc
= ena_com_dev_reset(adapter
->ena_dev
, adapter
->reset_reason
);
2776 netif_err(adapter
, ifdown
, adapter
->netdev
,
2777 "Device reset failed\n");
2778 /* stop submitting admin commands on a device that was reset */
2779 ena_com_set_admin_running_state(adapter
->ena_dev
, false);
2782 ena_destroy_all_io_queues(adapter
);
2784 ena_disable_io_intr_sync(adapter
);
2785 ena_free_io_irq(adapter
);
2786 ena_del_napi_in_range(adapter
, 0, io_queue_count
);
2788 ena_free_all_tx_bufs(adapter
);
2789 ena_free_all_rx_bufs(adapter
);
2790 ena_free_all_io_tx_resources(adapter
);
2791 ena_free_all_io_rx_resources(adapter
);
2794 /* ena_open - Called when a network interface is made active
2795 * @netdev: network interface device structure
2797 * Returns 0 on success, negative value on failure
2799 * The open entry point is called when a network interface is made
2800 * active by the system (IFF_UP). At this point all resources needed
2801 * for transmit and receive operations are allocated, the interrupt
2802 * handler is registered with the OS, the watchdog timer is started,
2803 * and the stack is notified that the interface is ready.
2805 static int ena_open(struct net_device
*netdev
)
2807 struct ena_adapter
*adapter
= netdev_priv(netdev
);
2810 /* Notify the stack of the actual queue counts. */
2811 rc
= netif_set_real_num_tx_queues(netdev
, adapter
->num_io_queues
);
2813 netif_err(adapter
, ifup
, netdev
, "Can't set num tx queues\n");
2817 rc
= netif_set_real_num_rx_queues(netdev
, adapter
->num_io_queues
);
2819 netif_err(adapter
, ifup
, netdev
, "Can't set num rx queues\n");
2823 rc
= ena_up(adapter
);
2830 /* ena_close - Disables a network interface
2831 * @netdev: network interface device structure
2833 * Returns 0, this is not allowed to fail
2835 * The close entry point is called when an interface is de-activated
2836 * by the OS. The hardware is still under the drivers control, but
2837 * needs to be disabled. A global MAC reset is issued to stop the
2838 * hardware, and all transmit and receive resources are freed.
2840 static int ena_close(struct net_device
*netdev
)
2842 struct ena_adapter
*adapter
= netdev_priv(netdev
);
2844 netif_dbg(adapter
, ifdown
, netdev
, "%s\n", __func__
);
2846 if (!test_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
))
2849 if (test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
2852 /* Check for device status and issue reset if needed*/
2853 check_for_admin_com_state(adapter
);
2854 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
2855 netif_err(adapter
, ifdown
, adapter
->netdev
,
2856 "Destroy failure, restarting device\n");
2857 ena_dump_stats_to_dmesg(adapter
);
2858 /* rtnl lock already obtained in dev_ioctl() layer */
2859 ena_destroy_device(adapter
, false);
2860 ena_restore_device(adapter
);
2866 int ena_update_queue_params(struct ena_adapter
*adapter
,
2869 u32 new_llq_header_len
)
2871 bool dev_was_up
, large_llq_changed
= false;
2874 dev_was_up
= test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
2875 ena_close(adapter
->netdev
);
2876 adapter
->requested_tx_ring_size
= new_tx_size
;
2877 adapter
->requested_rx_ring_size
= new_rx_size
;
2878 ena_init_io_rings(adapter
,
2880 adapter
->xdp_num_queues
+
2881 adapter
->num_io_queues
);
2883 large_llq_changed
= adapter
->ena_dev
->tx_mem_queue_type
==
2884 ENA_ADMIN_PLACEMENT_POLICY_DEV
;
2885 large_llq_changed
&=
2886 new_llq_header_len
!= adapter
->ena_dev
->tx_max_header_size
;
2888 /* a check that the configuration is valid is done by caller */
2889 if (large_llq_changed
) {
2890 adapter
->large_llq_header_enabled
= !adapter
->large_llq_header_enabled
;
2892 ena_destroy_device(adapter
, false);
2893 rc
= ena_restore_device(adapter
);
2896 return dev_was_up
&& !rc
? ena_up(adapter
) : rc
;
2899 int ena_set_rx_copybreak(struct ena_adapter
*adapter
, u32 rx_copybreak
)
2901 struct ena_ring
*rx_ring
;
2904 if (rx_copybreak
> min_t(u16
, adapter
->netdev
->mtu
, ENA_PAGE_SIZE
))
2907 adapter
->rx_copybreak
= rx_copybreak
;
2909 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
2910 rx_ring
= &adapter
->rx_ring
[i
];
2911 rx_ring
->rx_copybreak
= rx_copybreak
;
2917 int ena_update_queue_count(struct ena_adapter
*adapter
, u32 new_channel_count
)
2919 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2920 int prev_channel_count
;
2923 dev_was_up
= test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
2924 ena_close(adapter
->netdev
);
2925 prev_channel_count
= adapter
->num_io_queues
;
2926 adapter
->num_io_queues
= new_channel_count
;
2927 if (ena_xdp_present(adapter
) &&
2928 ena_xdp_allowed(adapter
) == ENA_XDP_ALLOWED
) {
2929 adapter
->xdp_first_ring
= new_channel_count
;
2930 adapter
->xdp_num_queues
= new_channel_count
;
2931 if (prev_channel_count
> new_channel_count
)
2932 ena_xdp_exchange_program_rx_in_range(adapter
,
2935 prev_channel_count
);
2937 ena_xdp_exchange_program_rx_in_range(adapter
,
2938 adapter
->xdp_bpf_prog
,
2943 /* We need to destroy the rss table so that the indirection
2944 * table will be reinitialized by ena_up()
2946 ena_com_rss_destroy(ena_dev
);
2947 ena_init_io_rings(adapter
,
2949 adapter
->xdp_num_queues
+
2950 adapter
->num_io_queues
);
2951 return dev_was_up
? ena_open(adapter
->netdev
) : 0;
2954 static void ena_tx_csum(struct ena_com_tx_ctx
*ena_tx_ctx
,
2955 struct sk_buff
*skb
,
2956 bool disable_meta_caching
)
2958 u32 mss
= skb_shinfo(skb
)->gso_size
;
2959 struct ena_com_tx_meta
*ena_meta
= &ena_tx_ctx
->ena_meta
;
2962 if ((skb
->ip_summed
== CHECKSUM_PARTIAL
) || mss
) {
2963 ena_tx_ctx
->l4_csum_enable
= 1;
2965 ena_tx_ctx
->tso_enable
= 1;
2966 ena_meta
->l4_hdr_len
= tcp_hdr(skb
)->doff
;
2967 ena_tx_ctx
->l4_csum_partial
= 0;
2969 ena_tx_ctx
->tso_enable
= 0;
2970 ena_meta
->l4_hdr_len
= 0;
2971 ena_tx_ctx
->l4_csum_partial
= 1;
2974 switch (ip_hdr(skb
)->version
) {
2976 ena_tx_ctx
->l3_proto
= ENA_ETH_IO_L3_PROTO_IPV4
;
2977 if (ip_hdr(skb
)->frag_off
& htons(IP_DF
))
2980 ena_tx_ctx
->l3_csum_enable
= 1;
2981 l4_protocol
= ip_hdr(skb
)->protocol
;
2984 ena_tx_ctx
->l3_proto
= ENA_ETH_IO_L3_PROTO_IPV6
;
2985 l4_protocol
= ipv6_hdr(skb
)->nexthdr
;
2991 if (l4_protocol
== IPPROTO_TCP
)
2992 ena_tx_ctx
->l4_proto
= ENA_ETH_IO_L4_PROTO_TCP
;
2994 ena_tx_ctx
->l4_proto
= ENA_ETH_IO_L4_PROTO_UDP
;
2996 ena_meta
->mss
= mss
;
2997 ena_meta
->l3_hdr_len
= skb_network_header_len(skb
);
2998 ena_meta
->l3_hdr_offset
= skb_network_offset(skb
);
2999 ena_tx_ctx
->meta_valid
= 1;
3000 } else if (disable_meta_caching
) {
3001 memset(ena_meta
, 0, sizeof(*ena_meta
));
3002 ena_tx_ctx
->meta_valid
= 1;
3004 ena_tx_ctx
->meta_valid
= 0;
3008 static int ena_check_and_linearize_skb(struct ena_ring
*tx_ring
,
3009 struct sk_buff
*skb
)
3011 int num_frags
, header_len
, rc
;
3013 num_frags
= skb_shinfo(skb
)->nr_frags
;
3014 header_len
= skb_headlen(skb
);
3016 if (num_frags
< tx_ring
->sgl_size
)
3019 if ((num_frags
== tx_ring
->sgl_size
) &&
3020 (header_len
< tx_ring
->tx_max_header_size
))
3023 ena_increase_stat(&tx_ring
->tx_stats
.linearize
, 1, &tx_ring
->syncp
);
3025 rc
= skb_linearize(skb
);
3027 ena_increase_stat(&tx_ring
->tx_stats
.linearize_failed
, 1,
3034 static int ena_tx_map_skb(struct ena_ring
*tx_ring
,
3035 struct ena_tx_buffer
*tx_info
,
3036 struct sk_buff
*skb
,
3040 struct ena_adapter
*adapter
= tx_ring
->adapter
;
3041 struct ena_com_buf
*ena_buf
;
3043 u32 skb_head_len
, frag_len
, last_frag
;
3048 skb_head_len
= skb_headlen(skb
);
3050 ena_buf
= tx_info
->bufs
;
3052 if (tx_ring
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
3053 /* When the device is LLQ mode, the driver will copy
3054 * the header into the device memory space.
3055 * the ena_com layer assume the header is in a linear
3057 * This assumption might be wrong since part of the header
3058 * can be in the fragmented buffers.
3059 * Use skb_header_pointer to make sure the header is in a
3060 * linear memory space.
3063 push_len
= min_t(u32
, skb
->len
, tx_ring
->tx_max_header_size
);
3064 *push_hdr
= skb_header_pointer(skb
, 0, push_len
,
3065 tx_ring
->push_buf_intermediate_buf
);
3066 *header_len
= push_len
;
3067 if (unlikely(skb
->data
!= *push_hdr
)) {
3068 ena_increase_stat(&tx_ring
->tx_stats
.llq_buffer_copy
, 1,
3071 delta
= push_len
- skb_head_len
;
3075 *header_len
= min_t(u32
, skb_head_len
,
3076 tx_ring
->tx_max_header_size
);
3079 netif_dbg(adapter
, tx_queued
, adapter
->netdev
,
3080 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb
,
3081 *push_hdr
, push_len
);
3083 if (skb_head_len
> push_len
) {
3084 dma
= dma_map_single(tx_ring
->dev
, skb
->data
+ push_len
,
3085 skb_head_len
- push_len
, DMA_TO_DEVICE
);
3086 if (unlikely(dma_mapping_error(tx_ring
->dev
, dma
)))
3087 goto error_report_dma_error
;
3089 ena_buf
->paddr
= dma
;
3090 ena_buf
->len
= skb_head_len
- push_len
;
3093 tx_info
->num_of_bufs
++;
3094 tx_info
->map_linear_data
= 1;
3096 tx_info
->map_linear_data
= 0;
3099 last_frag
= skb_shinfo(skb
)->nr_frags
;
3101 for (i
= 0; i
< last_frag
; i
++) {
3102 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3104 frag_len
= skb_frag_size(frag
);
3106 if (unlikely(delta
>= frag_len
)) {
3111 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, delta
,
3112 frag_len
- delta
, DMA_TO_DEVICE
);
3113 if (unlikely(dma_mapping_error(tx_ring
->dev
, dma
)))
3114 goto error_report_dma_error
;
3116 ena_buf
->paddr
= dma
;
3117 ena_buf
->len
= frag_len
- delta
;
3119 tx_info
->num_of_bufs
++;
3125 error_report_dma_error
:
3126 ena_increase_stat(&tx_ring
->tx_stats
.dma_mapping_err
, 1,
3128 netif_warn(adapter
, tx_queued
, adapter
->netdev
, "Failed to map skb\n");
3130 tx_info
->skb
= NULL
;
3132 tx_info
->num_of_bufs
+= i
;
3133 ena_unmap_tx_buff(tx_ring
, tx_info
);
3138 /* Called with netif_tx_lock. */
3139 static netdev_tx_t
ena_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
3141 struct ena_adapter
*adapter
= netdev_priv(dev
);
3142 struct ena_tx_buffer
*tx_info
;
3143 struct ena_com_tx_ctx ena_tx_ctx
;
3144 struct ena_ring
*tx_ring
;
3145 struct netdev_queue
*txq
;
3147 u16 next_to_use
, req_id
, header_len
;
3150 netif_dbg(adapter
, tx_queued
, dev
, "%s skb %p\n", __func__
, skb
);
3151 /* Determine which tx ring we will be placed on */
3152 qid
= skb_get_queue_mapping(skb
);
3153 tx_ring
= &adapter
->tx_ring
[qid
];
3154 txq
= netdev_get_tx_queue(dev
, qid
);
3156 rc
= ena_check_and_linearize_skb(tx_ring
, skb
);
3158 goto error_drop_packet
;
3160 skb_tx_timestamp(skb
);
3162 next_to_use
= tx_ring
->next_to_use
;
3163 req_id
= tx_ring
->free_ids
[next_to_use
];
3164 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
3165 tx_info
->num_of_bufs
= 0;
3167 WARN(tx_info
->skb
, "SKB isn't NULL req_id %d\n", req_id
);
3169 rc
= ena_tx_map_skb(tx_ring
, tx_info
, skb
, &push_hdr
, &header_len
);
3171 goto error_drop_packet
;
3173 memset(&ena_tx_ctx
, 0x0, sizeof(struct ena_com_tx_ctx
));
3174 ena_tx_ctx
.ena_bufs
= tx_info
->bufs
;
3175 ena_tx_ctx
.push_header
= push_hdr
;
3176 ena_tx_ctx
.num_bufs
= tx_info
->num_of_bufs
;
3177 ena_tx_ctx
.req_id
= req_id
;
3178 ena_tx_ctx
.header_len
= header_len
;
3180 /* set flags and meta data */
3181 ena_tx_csum(&ena_tx_ctx
, skb
, tx_ring
->disable_meta_caching
);
3183 rc
= ena_xmit_common(dev
,
3190 goto error_unmap_dma
;
3192 netdev_tx_sent_queue(txq
, skb
->len
);
3194 /* stop the queue when no more space available, the packet can have up
3195 * to sgl_size + 2. one for the meta descriptor and one for header
3196 * (if the header is larger than tx_max_header_size).
3198 if (unlikely(!ena_com_sq_have_enough_space(tx_ring
->ena_com_io_sq
,
3199 tx_ring
->sgl_size
+ 2))) {
3200 netif_dbg(adapter
, tx_queued
, dev
, "%s stop queue %d\n",
3203 netif_tx_stop_queue(txq
);
3204 ena_increase_stat(&tx_ring
->tx_stats
.queue_stop
, 1,
3207 /* There is a rare condition where this function decide to
3208 * stop the queue but meanwhile clean_tx_irq updates
3209 * next_to_completion and terminates.
3210 * The queue will remain stopped forever.
3211 * To solve this issue add a mb() to make sure that
3212 * netif_tx_stop_queue() write is vissible before checking if
3213 * there is additional space in the queue.
3217 if (ena_com_sq_have_enough_space(tx_ring
->ena_com_io_sq
,
3218 ENA_TX_WAKEUP_THRESH
)) {
3219 netif_tx_wake_queue(txq
);
3220 ena_increase_stat(&tx_ring
->tx_stats
.queue_wakeup
, 1,
3225 if (netif_xmit_stopped(txq
) || !netdev_xmit_more())
3226 /* trigger the dma engine. ena_ring_tx_doorbell()
3227 * calls a memory barrier inside it.
3229 ena_ring_tx_doorbell(tx_ring
);
3231 return NETDEV_TX_OK
;
3234 ena_unmap_tx_buff(tx_ring
, tx_info
);
3235 tx_info
->skb
= NULL
;
3239 return NETDEV_TX_OK
;
3242 static u16
ena_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
3243 struct net_device
*sb_dev
)
3246 /* we suspect that this is good for in--kernel network services that
3247 * want to loop incoming skb rx to tx in normal user generated traffic,
3248 * most probably we will not get to this
3250 if (skb_rx_queue_recorded(skb
))
3251 qid
= skb_get_rx_queue(skb
);
3253 qid
= netdev_pick_tx(dev
, skb
, NULL
);
3258 static void ena_config_host_info(struct ena_com_dev
*ena_dev
, struct pci_dev
*pdev
)
3260 struct device
*dev
= &pdev
->dev
;
3261 struct ena_admin_host_info
*host_info
;
3264 /* Allocate only the host info */
3265 rc
= ena_com_allocate_host_info(ena_dev
);
3267 dev_err(dev
, "Cannot allocate host info\n");
3271 host_info
= ena_dev
->host_attr
.host_info
;
3273 host_info
->bdf
= pci_dev_id(pdev
);
3274 host_info
->os_type
= ENA_ADMIN_OS_LINUX
;
3275 host_info
->kernel_ver
= LINUX_VERSION_CODE
;
3276 strscpy(host_info
->kernel_ver_str
, utsname()->version
,
3277 sizeof(host_info
->kernel_ver_str
) - 1);
3278 host_info
->os_dist
= 0;
3279 strncpy(host_info
->os_dist_str
, utsname()->release
,
3280 sizeof(host_info
->os_dist_str
) - 1);
3281 host_info
->driver_version
=
3282 (DRV_MODULE_GEN_MAJOR
) |
3283 (DRV_MODULE_GEN_MINOR
<< ENA_ADMIN_HOST_INFO_MINOR_SHIFT
) |
3284 (DRV_MODULE_GEN_SUBMINOR
<< ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT
) |
3285 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT
);
3286 host_info
->num_cpus
= num_online_cpus();
3288 host_info
->driver_supported_features
=
3289 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK
|
3290 ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK
|
3291 ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK
|
3292 ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK
|
3293 ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK
;
3295 rc
= ena_com_set_host_attributes(ena_dev
);
3297 if (rc
== -EOPNOTSUPP
)
3298 dev_warn(dev
, "Cannot set host attributes\n");
3300 dev_err(dev
, "Cannot set host attributes\n");
3308 ena_com_delete_host_info(ena_dev
);
3311 static void ena_config_debug_area(struct ena_adapter
*adapter
)
3313 u32 debug_area_size
;
3316 ss_count
= ena_get_sset_count(adapter
->netdev
, ETH_SS_STATS
);
3317 if (ss_count
<= 0) {
3318 netif_err(adapter
, drv
, adapter
->netdev
,
3319 "SS count is negative\n");
3323 /* allocate 32 bytes for each string and 64bit for the value */
3324 debug_area_size
= ss_count
* ETH_GSTRING_LEN
+ sizeof(u64
) * ss_count
;
3326 rc
= ena_com_allocate_debug_area(adapter
->ena_dev
, debug_area_size
);
3328 netif_err(adapter
, drv
, adapter
->netdev
,
3329 "Cannot allocate debug area\n");
3333 rc
= ena_com_set_host_attributes(adapter
->ena_dev
);
3335 if (rc
== -EOPNOTSUPP
)
3336 netif_warn(adapter
, drv
, adapter
->netdev
,
3337 "Cannot set host attributes\n");
3339 netif_err(adapter
, drv
, adapter
->netdev
,
3340 "Cannot set host attributes\n");
3346 ena_com_delete_debug_area(adapter
->ena_dev
);
3349 int ena_update_hw_stats(struct ena_adapter
*adapter
)
3353 rc
= ena_com_get_eni_stats(adapter
->ena_dev
, &adapter
->eni_stats
);
3355 netdev_err(adapter
->netdev
, "Failed to get ENI stats\n");
3362 static void ena_get_stats64(struct net_device
*netdev
,
3363 struct rtnl_link_stats64
*stats
)
3365 struct ena_adapter
*adapter
= netdev_priv(netdev
);
3366 struct ena_ring
*rx_ring
, *tx_ring
;
3372 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
3375 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
3378 tx_ring
= &adapter
->tx_ring
[i
];
3381 start
= u64_stats_fetch_begin(&tx_ring
->syncp
);
3382 packets
= tx_ring
->tx_stats
.cnt
;
3383 bytes
= tx_ring
->tx_stats
.bytes
;
3384 } while (u64_stats_fetch_retry(&tx_ring
->syncp
, start
));
3386 stats
->tx_packets
+= packets
;
3387 stats
->tx_bytes
+= bytes
;
3389 rx_ring
= &adapter
->rx_ring
[i
];
3392 start
= u64_stats_fetch_begin(&rx_ring
->syncp
);
3393 packets
= rx_ring
->rx_stats
.cnt
;
3394 bytes
= rx_ring
->rx_stats
.bytes
;
3395 } while (u64_stats_fetch_retry(&rx_ring
->syncp
, start
));
3397 stats
->rx_packets
+= packets
;
3398 stats
->rx_bytes
+= bytes
;
3402 start
= u64_stats_fetch_begin(&adapter
->syncp
);
3403 rx_drops
= adapter
->dev_stats
.rx_drops
;
3404 tx_drops
= adapter
->dev_stats
.tx_drops
;
3405 } while (u64_stats_fetch_retry(&adapter
->syncp
, start
));
3407 stats
->rx_dropped
= rx_drops
;
3408 stats
->tx_dropped
= tx_drops
;
3410 stats
->multicast
= 0;
3411 stats
->collisions
= 0;
3413 stats
->rx_length_errors
= 0;
3414 stats
->rx_crc_errors
= 0;
3415 stats
->rx_frame_errors
= 0;
3416 stats
->rx_fifo_errors
= 0;
3417 stats
->rx_missed_errors
= 0;
3418 stats
->tx_window_errors
= 0;
3420 stats
->rx_errors
= 0;
3421 stats
->tx_errors
= 0;
3424 static const struct net_device_ops ena_netdev_ops
= {
3425 .ndo_open
= ena_open
,
3426 .ndo_stop
= ena_close
,
3427 .ndo_start_xmit
= ena_start_xmit
,
3428 .ndo_select_queue
= ena_select_queue
,
3429 .ndo_get_stats64
= ena_get_stats64
,
3430 .ndo_tx_timeout
= ena_tx_timeout
,
3431 .ndo_change_mtu
= ena_change_mtu
,
3432 .ndo_set_mac_address
= NULL
,
3433 .ndo_validate_addr
= eth_validate_addr
,
3435 .ndo_xdp_xmit
= ena_xdp_xmit
,
3438 static void ena_calc_io_queue_size(struct ena_adapter
*adapter
,
3439 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
3441 struct ena_admin_feature_llq_desc
*llq
= &get_feat_ctx
->llq
;
3442 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
3443 u32 tx_queue_size
= ENA_DEFAULT_RING_SIZE
;
3444 u32 rx_queue_size
= ENA_DEFAULT_RING_SIZE
;
3445 u32 max_tx_queue_size
;
3446 u32 max_rx_queue_size
;
3448 /* If this function is called after driver load, the ring sizes have already
3449 * been configured. Take it into account when recalculating ring size.
3451 if (adapter
->tx_ring
->ring_size
)
3452 tx_queue_size
= adapter
->tx_ring
->ring_size
;
3454 if (adapter
->rx_ring
->ring_size
)
3455 rx_queue_size
= adapter
->rx_ring
->ring_size
;
3457 if (ena_dev
->supported_features
& BIT(ENA_ADMIN_MAX_QUEUES_EXT
)) {
3458 struct ena_admin_queue_ext_feature_fields
*max_queue_ext
=
3459 &get_feat_ctx
->max_queue_ext
.max_queue_ext
;
3460 max_rx_queue_size
= min_t(u32
, max_queue_ext
->max_rx_cq_depth
,
3461 max_queue_ext
->max_rx_sq_depth
);
3462 max_tx_queue_size
= max_queue_ext
->max_tx_cq_depth
;
3464 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
3465 max_tx_queue_size
= min_t(u32
, max_tx_queue_size
,
3466 llq
->max_llq_depth
);
3468 max_tx_queue_size
= min_t(u32
, max_tx_queue_size
,
3469 max_queue_ext
->max_tx_sq_depth
);
3471 adapter
->max_tx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
3472 max_queue_ext
->max_per_packet_tx_descs
);
3473 adapter
->max_rx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
3474 max_queue_ext
->max_per_packet_rx_descs
);
3476 struct ena_admin_queue_feature_desc
*max_queues
=
3477 &get_feat_ctx
->max_queues
;
3478 max_rx_queue_size
= min_t(u32
, max_queues
->max_cq_depth
,
3479 max_queues
->max_sq_depth
);
3480 max_tx_queue_size
= max_queues
->max_cq_depth
;
3482 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
3483 max_tx_queue_size
= min_t(u32
, max_tx_queue_size
,
3484 llq
->max_llq_depth
);
3486 max_tx_queue_size
= min_t(u32
, max_tx_queue_size
,
3487 max_queues
->max_sq_depth
);
3489 adapter
->max_tx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
3490 max_queues
->max_packet_tx_descs
);
3491 adapter
->max_rx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
3492 max_queues
->max_packet_rx_descs
);
3495 max_tx_queue_size
= rounddown_pow_of_two(max_tx_queue_size
);
3496 max_rx_queue_size
= rounddown_pow_of_two(max_rx_queue_size
);
3498 /* When forcing large headers, we multiply the entry size by 2, and therefore divide
3499 * the queue size by 2, leaving the amount of memory used by the queues unchanged.
3501 if (adapter
->large_llq_header_enabled
) {
3502 if ((llq
->entry_size_ctrl_supported
& ENA_ADMIN_LIST_ENTRY_SIZE_256B
) &&
3503 ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
3504 max_tx_queue_size
/= 2;
3505 dev_info(&adapter
->pdev
->dev
,
3506 "Forcing large headers and decreasing maximum TX queue size to %d\n",
3509 dev_err(&adapter
->pdev
->dev
,
3510 "Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
3512 adapter
->large_llq_header_enabled
= false;
3516 tx_queue_size
= clamp_val(tx_queue_size
, ENA_MIN_RING_SIZE
,
3518 rx_queue_size
= clamp_val(rx_queue_size
, ENA_MIN_RING_SIZE
,
3521 tx_queue_size
= rounddown_pow_of_two(tx_queue_size
);
3522 rx_queue_size
= rounddown_pow_of_two(rx_queue_size
);
3524 adapter
->max_tx_ring_size
= max_tx_queue_size
;
3525 adapter
->max_rx_ring_size
= max_rx_queue_size
;
3526 adapter
->requested_tx_ring_size
= tx_queue_size
;
3527 adapter
->requested_rx_ring_size
= rx_queue_size
;
3530 static int ena_device_validate_params(struct ena_adapter
*adapter
,
3531 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
3533 struct net_device
*netdev
= adapter
->netdev
;
3536 rc
= ether_addr_equal(get_feat_ctx
->dev_attr
.mac_addr
,
3539 netif_err(adapter
, drv
, netdev
,
3540 "Error, mac address are different\n");
3544 if (get_feat_ctx
->dev_attr
.max_mtu
< netdev
->mtu
) {
3545 netif_err(adapter
, drv
, netdev
,
3546 "Error, device max mtu is smaller than netdev MTU\n");
3553 static void set_default_llq_configurations(struct ena_adapter
*adapter
,
3554 struct ena_llq_configurations
*llq_config
,
3555 struct ena_admin_feature_llq_desc
*llq
)
3557 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
3559 llq_config
->llq_header_location
= ENA_ADMIN_INLINE_HEADER
;
3560 llq_config
->llq_stride_ctrl
= ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY
;
3561 llq_config
->llq_num_decs_before_header
= ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2
;
3563 adapter
->large_llq_header_supported
=
3564 !!(ena_dev
->supported_features
& BIT(ENA_ADMIN_LLQ
));
3565 adapter
->large_llq_header_supported
&=
3566 !!(llq
->entry_size_ctrl_supported
&
3567 ENA_ADMIN_LIST_ENTRY_SIZE_256B
);
3569 if ((llq
->entry_size_ctrl_supported
& ENA_ADMIN_LIST_ENTRY_SIZE_256B
) &&
3570 adapter
->large_llq_header_enabled
) {
3571 llq_config
->llq_ring_entry_size
= ENA_ADMIN_LIST_ENTRY_SIZE_256B
;
3572 llq_config
->llq_ring_entry_size_value
= 256;
3574 llq_config
->llq_ring_entry_size
= ENA_ADMIN_LIST_ENTRY_SIZE_128B
;
3575 llq_config
->llq_ring_entry_size_value
= 128;
3579 static int ena_set_queues_placement_policy(struct pci_dev
*pdev
,
3580 struct ena_com_dev
*ena_dev
,
3581 struct ena_admin_feature_llq_desc
*llq
,
3582 struct ena_llq_configurations
*llq_default_configurations
)
3585 u32 llq_feature_mask
;
3587 llq_feature_mask
= 1 << ENA_ADMIN_LLQ
;
3588 if (!(ena_dev
->supported_features
& llq_feature_mask
)) {
3589 dev_warn(&pdev
->dev
,
3590 "LLQ is not supported Fallback to host mode policy.\n");
3591 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
3595 if (!ena_dev
->mem_bar
) {
3596 netdev_err(ena_dev
->net_device
,
3597 "LLQ is advertised as supported but device doesn't expose mem bar\n");
3598 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
3602 rc
= ena_com_config_dev_mode(ena_dev
, llq
, llq_default_configurations
);
3605 "Failed to configure the device mode. Fallback to host mode policy.\n");
3606 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
3612 static int ena_map_llq_mem_bar(struct pci_dev
*pdev
, struct ena_com_dev
*ena_dev
,
3615 bool has_mem_bar
= !!(bars
& BIT(ENA_MEM_BAR
));
3620 ena_dev
->mem_bar
= devm_ioremap_wc(&pdev
->dev
,
3621 pci_resource_start(pdev
, ENA_MEM_BAR
),
3622 pci_resource_len(pdev
, ENA_MEM_BAR
));
3624 if (!ena_dev
->mem_bar
)
3630 static int ena_device_init(struct ena_adapter
*adapter
, struct pci_dev
*pdev
,
3631 struct ena_com_dev_get_features_ctx
*get_feat_ctx
,
3634 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
3635 struct ena_llq_configurations llq_config
;
3636 struct device
*dev
= &pdev
->dev
;
3637 bool readless_supported
;
3642 rc
= ena_com_mmio_reg_read_request_init(ena_dev
);
3644 dev_err(dev
, "Failed to init mmio read less\n");
3648 /* The PCIe configuration space revision id indicate if mmio reg
3651 readless_supported
= !(pdev
->revision
& ENA_MMIO_DISABLE_REG_READ
);
3652 ena_com_set_mmio_read_mode(ena_dev
, readless_supported
);
3654 rc
= ena_com_dev_reset(ena_dev
, ENA_REGS_RESET_NORMAL
);
3656 dev_err(dev
, "Can not reset device\n");
3657 goto err_mmio_read_less
;
3660 rc
= ena_com_validate_version(ena_dev
);
3662 dev_err(dev
, "Device version is too low\n");
3663 goto err_mmio_read_less
;
3666 dma_width
= ena_com_get_dma_width(ena_dev
);
3667 if (dma_width
< 0) {
3668 dev_err(dev
, "Invalid dma width value %d", dma_width
);
3670 goto err_mmio_read_less
;
3673 rc
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(dma_width
));
3675 dev_err(dev
, "dma_set_mask_and_coherent failed %d\n", rc
);
3676 goto err_mmio_read_less
;
3679 /* ENA admin level init */
3680 rc
= ena_com_admin_init(ena_dev
, &aenq_handlers
);
3683 "Can not initialize ena admin queue with device\n");
3684 goto err_mmio_read_less
;
3687 /* To enable the msix interrupts the driver needs to know the number
3688 * of queues. So the driver uses polling mode to retrieve this
3691 ena_com_set_admin_polling_mode(ena_dev
, true);
3693 ena_config_host_info(ena_dev
, pdev
);
3695 /* Get Device Attributes*/
3696 rc
= ena_com_get_dev_attr_feat(ena_dev
, get_feat_ctx
);
3698 dev_err(dev
, "Cannot get attribute for ena device rc=%d\n", rc
);
3699 goto err_admin_init
;
3702 /* Try to turn all the available aenq groups */
3703 aenq_groups
= BIT(ENA_ADMIN_LINK_CHANGE
) |
3704 BIT(ENA_ADMIN_FATAL_ERROR
) |
3705 BIT(ENA_ADMIN_WARNING
) |
3706 BIT(ENA_ADMIN_NOTIFICATION
) |
3707 BIT(ENA_ADMIN_KEEP_ALIVE
);
3709 aenq_groups
&= get_feat_ctx
->aenq
.supported_groups
;
3711 rc
= ena_com_set_aenq_config(ena_dev
, aenq_groups
);
3713 dev_err(dev
, "Cannot configure aenq groups rc= %d\n", rc
);
3714 goto err_admin_init
;
3717 *wd_state
= !!(aenq_groups
& BIT(ENA_ADMIN_KEEP_ALIVE
));
3719 set_default_llq_configurations(adapter
, &llq_config
, &get_feat_ctx
->llq
);
3721 rc
= ena_set_queues_placement_policy(pdev
, ena_dev
, &get_feat_ctx
->llq
,
3724 dev_err(dev
, "ENA device init failed\n");
3725 goto err_admin_init
;
3728 ena_calc_io_queue_size(adapter
, get_feat_ctx
);
3733 ena_com_delete_host_info(ena_dev
);
3734 ena_com_admin_destroy(ena_dev
);
3736 ena_com_mmio_reg_read_request_destroy(ena_dev
);
3741 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter
*adapter
)
3743 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
3744 struct device
*dev
= &adapter
->pdev
->dev
;
3747 rc
= ena_enable_msix(adapter
);
3749 dev_err(dev
, "Can not reserve msix vectors\n");
3753 ena_setup_mgmnt_intr(adapter
);
3755 rc
= ena_request_mgmnt_irq(adapter
);
3757 dev_err(dev
, "Can not setup management interrupts\n");
3758 goto err_disable_msix
;
3761 ena_com_set_admin_polling_mode(ena_dev
, false);
3763 ena_com_admin_aenq_enable(ena_dev
);
3768 ena_disable_msix(adapter
);
3773 static void ena_destroy_device(struct ena_adapter
*adapter
, bool graceful
)
3775 struct net_device
*netdev
= adapter
->netdev
;
3776 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
3779 if (!test_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
))
3782 netif_carrier_off(netdev
);
3784 del_timer_sync(&adapter
->timer_service
);
3786 dev_up
= test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
3787 adapter
->dev_up_before_reset
= dev_up
;
3789 ena_com_set_admin_running_state(ena_dev
, false);
3791 if (test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
3794 /* Stop the device from sending AENQ events (in case reset flag is set
3795 * and device is up, ena_down() already reset the device.
3797 if (!(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
) && dev_up
))
3798 ena_com_dev_reset(adapter
->ena_dev
, adapter
->reset_reason
);
3800 ena_free_mgmnt_irq(adapter
);
3802 ena_disable_msix(adapter
);
3804 ena_com_abort_admin_commands(ena_dev
);
3806 ena_com_wait_for_abort_completion(ena_dev
);
3808 ena_com_admin_destroy(ena_dev
);
3810 ena_com_mmio_reg_read_request_destroy(ena_dev
);
3812 /* return reset reason to default value */
3813 adapter
->reset_reason
= ENA_REGS_RESET_NORMAL
;
3815 clear_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
3816 clear_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
);
3819 static int ena_restore_device(struct ena_adapter
*adapter
)
3821 struct ena_com_dev_get_features_ctx get_feat_ctx
;
3822 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
3823 struct pci_dev
*pdev
= adapter
->pdev
;
3824 struct ena_ring
*txr
;
3828 set_bit(ENA_FLAG_ONGOING_RESET
, &adapter
->flags
);
3829 rc
= ena_device_init(adapter
, adapter
->pdev
, &get_feat_ctx
, &wd_state
);
3831 dev_err(&pdev
->dev
, "Can not initialize device\n");
3834 adapter
->wd_state
= wd_state
;
3836 count
= adapter
->xdp_num_queues
+ adapter
->num_io_queues
;
3837 for (i
= 0 ; i
< count
; i
++) {
3838 txr
= &adapter
->tx_ring
[i
];
3839 txr
->tx_mem_queue_type
= ena_dev
->tx_mem_queue_type
;
3840 txr
->tx_max_header_size
= ena_dev
->tx_max_header_size
;
3843 rc
= ena_device_validate_params(adapter
, &get_feat_ctx
);
3845 dev_err(&pdev
->dev
, "Validation of device parameters failed\n");
3846 goto err_device_destroy
;
3849 rc
= ena_enable_msix_and_set_admin_interrupts(adapter
);
3851 dev_err(&pdev
->dev
, "Enable MSI-X failed\n");
3852 goto err_device_destroy
;
3854 /* If the interface was up before the reset bring it up */
3855 if (adapter
->dev_up_before_reset
) {
3856 rc
= ena_up(adapter
);
3858 dev_err(&pdev
->dev
, "Failed to create I/O queues\n");
3859 goto err_disable_msix
;
3863 set_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
);
3865 clear_bit(ENA_FLAG_ONGOING_RESET
, &adapter
->flags
);
3866 if (test_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
))
3867 netif_carrier_on(adapter
->netdev
);
3869 mod_timer(&adapter
->timer_service
, round_jiffies(jiffies
+ HZ
));
3870 adapter
->last_keep_alive_jiffies
= jiffies
;
3874 ena_free_mgmnt_irq(adapter
);
3875 ena_disable_msix(adapter
);
3877 ena_com_abort_admin_commands(ena_dev
);
3878 ena_com_wait_for_abort_completion(ena_dev
);
3879 ena_com_admin_destroy(ena_dev
);
3880 ena_com_dev_reset(ena_dev
, ENA_REGS_RESET_DRIVER_INVALID_STATE
);
3881 ena_com_mmio_reg_read_request_destroy(ena_dev
);
3883 clear_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
);
3884 clear_bit(ENA_FLAG_ONGOING_RESET
, &adapter
->flags
);
3886 "Reset attempt failed. Can not reset the device\n");
3891 static void ena_fw_reset_device(struct work_struct
*work
)
3893 struct ena_adapter
*adapter
=
3894 container_of(work
, struct ena_adapter
, reset_task
);
3898 if (likely(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
3899 ena_destroy_device(adapter
, false);
3900 ena_restore_device(adapter
);
3902 dev_err(&adapter
->pdev
->dev
, "Device reset completed successfully\n");
3908 static int check_for_rx_interrupt_queue(struct ena_adapter
*adapter
,
3909 struct ena_ring
*rx_ring
)
3911 struct ena_napi
*ena_napi
= container_of(rx_ring
->napi
, struct ena_napi
, napi
);
3913 if (likely(READ_ONCE(ena_napi
->first_interrupt
)))
3916 if (ena_com_cq_empty(rx_ring
->ena_com_io_cq
))
3919 rx_ring
->no_interrupt_event_cnt
++;
3921 if (rx_ring
->no_interrupt_event_cnt
== ENA_MAX_NO_INTERRUPT_ITERATIONS
) {
3922 netif_err(adapter
, rx_err
, adapter
->netdev
,
3923 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
3926 ena_reset_device(adapter
, ENA_REGS_RESET_MISS_INTERRUPT
);
3933 static int check_missing_comp_in_tx_queue(struct ena_adapter
*adapter
,
3934 struct ena_ring
*tx_ring
)
3936 struct ena_napi
*ena_napi
= container_of(tx_ring
->napi
, struct ena_napi
, napi
);
3937 unsigned int time_since_last_napi
;
3938 unsigned int missing_tx_comp_to
;
3939 bool is_tx_comp_time_expired
;
3940 struct ena_tx_buffer
*tx_buf
;
3941 unsigned long last_jiffies
;
3945 for (i
= 0; i
< tx_ring
->ring_size
; i
++) {
3946 tx_buf
= &tx_ring
->tx_buffer_info
[i
];
3947 last_jiffies
= tx_buf
->last_jiffies
;
3949 if (last_jiffies
== 0)
3950 /* no pending Tx at this location */
3953 is_tx_comp_time_expired
= time_is_before_jiffies(last_jiffies
+
3954 2 * adapter
->missing_tx_completion_to
);
3956 if (unlikely(!READ_ONCE(ena_napi
->first_interrupt
) && is_tx_comp_time_expired
)) {
3957 /* If after graceful period interrupt is still not
3958 * received, we schedule a reset
3960 netif_err(adapter
, tx_err
, adapter
->netdev
,
3961 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
3963 ena_reset_device(adapter
, ENA_REGS_RESET_MISS_INTERRUPT
);
3967 is_tx_comp_time_expired
= time_is_before_jiffies(last_jiffies
+
3968 adapter
->missing_tx_completion_to
);
3970 if (unlikely(is_tx_comp_time_expired
)) {
3971 if (!tx_buf
->print_once
) {
3972 time_since_last_napi
= jiffies_to_usecs(jiffies
- tx_ring
->tx_stats
.last_napi_jiffies
);
3973 missing_tx_comp_to
= jiffies_to_msecs(adapter
->missing_tx_completion_to
);
3974 netif_notice(adapter
, tx_err
, adapter
->netdev
,
3975 "Found a Tx that wasn't completed on time, qid %d, index %d. %u usecs have passed since last napi execution. Missing Tx timeout value %u msecs\n",
3976 tx_ring
->qid
, i
, time_since_last_napi
, missing_tx_comp_to
);
3979 tx_buf
->print_once
= 1;
3984 if (unlikely(missed_tx
> adapter
->missing_tx_completion_threshold
)) {
3985 netif_err(adapter
, tx_err
, adapter
->netdev
,
3986 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
3988 adapter
->missing_tx_completion_threshold
);
3989 ena_reset_device(adapter
, ENA_REGS_RESET_MISS_TX_CMPL
);
3993 ena_increase_stat(&tx_ring
->tx_stats
.missed_tx
, missed_tx
,
3999 static void check_for_missing_completions(struct ena_adapter
*adapter
)
4001 struct ena_ring
*tx_ring
;
4002 struct ena_ring
*rx_ring
;
4006 io_queue_count
= adapter
->xdp_num_queues
+ adapter
->num_io_queues
;
4007 /* Make sure the driver doesn't turn the device in other process */
4010 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
4013 if (test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))
4016 if (adapter
->missing_tx_completion_to
== ENA_HW_HINTS_NO_TIMEOUT
)
4019 budget
= ENA_MONITORED_TX_QUEUES
;
4021 for (i
= adapter
->last_monitored_tx_qid
; i
< io_queue_count
; i
++) {
4022 tx_ring
= &adapter
->tx_ring
[i
];
4023 rx_ring
= &adapter
->rx_ring
[i
];
4025 rc
= check_missing_comp_in_tx_queue(adapter
, tx_ring
);
4029 rc
= !ENA_IS_XDP_INDEX(adapter
, i
) ?
4030 check_for_rx_interrupt_queue(adapter
, rx_ring
) : 0;
4039 adapter
->last_monitored_tx_qid
= i
% io_queue_count
;
4042 /* trigger napi schedule after 2 consecutive detections */
4043 #define EMPTY_RX_REFILL 2
4044 /* For the rare case where the device runs out of Rx descriptors and the
4045 * napi handler failed to refill new Rx descriptors (due to a lack of memory
4047 * This case will lead to a deadlock:
4048 * The device won't send interrupts since all the new Rx packets will be dropped
4049 * The napi handler won't allocate new Rx descriptors so the device will be
4050 * able to send new packets.
4052 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
4053 * It is recommended to have at least 512MB, with a minimum of 128MB for
4054 * constrained environment).
4056 * When such a situation is detected - Reschedule napi
4058 static void check_for_empty_rx_ring(struct ena_adapter
*adapter
)
4060 struct ena_ring
*rx_ring
;
4061 int i
, refill_required
;
4063 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
4066 if (test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))
4069 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
4070 rx_ring
= &adapter
->rx_ring
[i
];
4072 refill_required
= ena_com_free_q_entries(rx_ring
->ena_com_io_sq
);
4073 if (unlikely(refill_required
== (rx_ring
->ring_size
- 1))) {
4074 rx_ring
->empty_rx_queue
++;
4076 if (rx_ring
->empty_rx_queue
>= EMPTY_RX_REFILL
) {
4077 ena_increase_stat(&rx_ring
->rx_stats
.empty_rx_ring
, 1,
4080 netif_err(adapter
, drv
, adapter
->netdev
,
4081 "Trigger refill for ring %d\n", i
);
4083 napi_schedule(rx_ring
->napi
);
4084 rx_ring
->empty_rx_queue
= 0;
4087 rx_ring
->empty_rx_queue
= 0;
4092 /* Check for keep alive expiration */
4093 static void check_for_missing_keep_alive(struct ena_adapter
*adapter
)
4095 unsigned long keep_alive_expired
;
4097 if (!adapter
->wd_state
)
4100 if (adapter
->keep_alive_timeout
== ENA_HW_HINTS_NO_TIMEOUT
)
4103 keep_alive_expired
= adapter
->last_keep_alive_jiffies
+
4104 adapter
->keep_alive_timeout
;
4105 if (unlikely(time_is_before_jiffies(keep_alive_expired
))) {
4106 netif_err(adapter
, drv
, adapter
->netdev
,
4107 "Keep alive watchdog timeout.\n");
4108 ena_increase_stat(&adapter
->dev_stats
.wd_expired
, 1,
4110 ena_reset_device(adapter
, ENA_REGS_RESET_KEEP_ALIVE_TO
);
4114 static void check_for_admin_com_state(struct ena_adapter
*adapter
)
4116 if (unlikely(!ena_com_get_admin_running_state(adapter
->ena_dev
))) {
4117 netif_err(adapter
, drv
, adapter
->netdev
,
4118 "ENA admin queue is not in running state!\n");
4119 ena_increase_stat(&adapter
->dev_stats
.admin_q_pause
, 1,
4121 ena_reset_device(adapter
, ENA_REGS_RESET_ADMIN_TO
);
4125 static void ena_update_hints(struct ena_adapter
*adapter
,
4126 struct ena_admin_ena_hw_hints
*hints
)
4128 struct net_device
*netdev
= adapter
->netdev
;
4130 if (hints
->admin_completion_tx_timeout
)
4131 adapter
->ena_dev
->admin_queue
.completion_timeout
=
4132 hints
->admin_completion_tx_timeout
* 1000;
4134 if (hints
->mmio_read_timeout
)
4135 /* convert to usec */
4136 adapter
->ena_dev
->mmio_read
.reg_read_to
=
4137 hints
->mmio_read_timeout
* 1000;
4139 if (hints
->missed_tx_completion_count_threshold_to_reset
)
4140 adapter
->missing_tx_completion_threshold
=
4141 hints
->missed_tx_completion_count_threshold_to_reset
;
4143 if (hints
->missing_tx_completion_timeout
) {
4144 if (hints
->missing_tx_completion_timeout
== ENA_HW_HINTS_NO_TIMEOUT
)
4145 adapter
->missing_tx_completion_to
= ENA_HW_HINTS_NO_TIMEOUT
;
4147 adapter
->missing_tx_completion_to
=
4148 msecs_to_jiffies(hints
->missing_tx_completion_timeout
);
4151 if (hints
->netdev_wd_timeout
)
4152 netdev
->watchdog_timeo
= msecs_to_jiffies(hints
->netdev_wd_timeout
);
4154 if (hints
->driver_watchdog_timeout
) {
4155 if (hints
->driver_watchdog_timeout
== ENA_HW_HINTS_NO_TIMEOUT
)
4156 adapter
->keep_alive_timeout
= ENA_HW_HINTS_NO_TIMEOUT
;
4158 adapter
->keep_alive_timeout
=
4159 msecs_to_jiffies(hints
->driver_watchdog_timeout
);
4163 static void ena_update_host_info(struct ena_admin_host_info
*host_info
,
4164 struct net_device
*netdev
)
4166 host_info
->supported_network_features
[0] =
4167 netdev
->features
& GENMASK_ULL(31, 0);
4168 host_info
->supported_network_features
[1] =
4169 (netdev
->features
& GENMASK_ULL(63, 32)) >> 32;
4172 static void ena_timer_service(struct timer_list
*t
)
4174 struct ena_adapter
*adapter
= from_timer(adapter
, t
, timer_service
);
4175 u8
*debug_area
= adapter
->ena_dev
->host_attr
.debug_area_virt_addr
;
4176 struct ena_admin_host_info
*host_info
=
4177 adapter
->ena_dev
->host_attr
.host_info
;
4179 check_for_missing_keep_alive(adapter
);
4181 check_for_admin_com_state(adapter
);
4183 check_for_missing_completions(adapter
);
4185 check_for_empty_rx_ring(adapter
);
4188 ena_dump_stats_to_buf(adapter
, debug_area
);
4191 ena_update_host_info(host_info
, adapter
->netdev
);
4193 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
4194 netif_err(adapter
, drv
, adapter
->netdev
,
4195 "Trigger reset is on\n");
4196 ena_dump_stats_to_dmesg(adapter
);
4197 queue_work(ena_wq
, &adapter
->reset_task
);
4201 /* Reset the timer */
4202 mod_timer(&adapter
->timer_service
, round_jiffies(jiffies
+ HZ
));
4205 static u32
ena_calc_max_io_queue_num(struct pci_dev
*pdev
,
4206 struct ena_com_dev
*ena_dev
,
4207 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
4209 u32 io_tx_sq_num
, io_tx_cq_num
, io_rx_num
, max_num_io_queues
;
4211 if (ena_dev
->supported_features
& BIT(ENA_ADMIN_MAX_QUEUES_EXT
)) {
4212 struct ena_admin_queue_ext_feature_fields
*max_queue_ext
=
4213 &get_feat_ctx
->max_queue_ext
.max_queue_ext
;
4214 io_rx_num
= min_t(u32
, max_queue_ext
->max_rx_sq_num
,
4215 max_queue_ext
->max_rx_cq_num
);
4217 io_tx_sq_num
= max_queue_ext
->max_tx_sq_num
;
4218 io_tx_cq_num
= max_queue_ext
->max_tx_cq_num
;
4220 struct ena_admin_queue_feature_desc
*max_queues
=
4221 &get_feat_ctx
->max_queues
;
4222 io_tx_sq_num
= max_queues
->max_sq_num
;
4223 io_tx_cq_num
= max_queues
->max_cq_num
;
4224 io_rx_num
= min_t(u32
, io_tx_sq_num
, io_tx_cq_num
);
4227 /* In case of LLQ use the llq fields for the tx SQ/CQ */
4228 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
4229 io_tx_sq_num
= get_feat_ctx
->llq
.max_llq_num
;
4231 max_num_io_queues
= min_t(u32
, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES
);
4232 max_num_io_queues
= min_t(u32
, max_num_io_queues
, io_rx_num
);
4233 max_num_io_queues
= min_t(u32
, max_num_io_queues
, io_tx_sq_num
);
4234 max_num_io_queues
= min_t(u32
, max_num_io_queues
, io_tx_cq_num
);
4235 /* 1 IRQ for mgmnt and 1 IRQs for each IO direction */
4236 max_num_io_queues
= min_t(u32
, max_num_io_queues
, pci_msix_vec_count(pdev
) - 1);
4238 return max_num_io_queues
;
4241 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx
*feat
,
4242 struct net_device
*netdev
)
4244 netdev_features_t dev_features
= 0;
4246 /* Set offload features */
4247 if (feat
->offload
.tx
&
4248 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK
)
4249 dev_features
|= NETIF_F_IP_CSUM
;
4251 if (feat
->offload
.tx
&
4252 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK
)
4253 dev_features
|= NETIF_F_IPV6_CSUM
;
4255 if (feat
->offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK
)
4256 dev_features
|= NETIF_F_TSO
;
4258 if (feat
->offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK
)
4259 dev_features
|= NETIF_F_TSO6
;
4261 if (feat
->offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK
)
4262 dev_features
|= NETIF_F_TSO_ECN
;
4264 if (feat
->offload
.rx_supported
&
4265 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK
)
4266 dev_features
|= NETIF_F_RXCSUM
;
4268 if (feat
->offload
.rx_supported
&
4269 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK
)
4270 dev_features
|= NETIF_F_RXCSUM
;
4278 netdev
->hw_features
|= netdev
->features
;
4279 netdev
->vlan_features
|= netdev
->features
;
4282 static void ena_set_conf_feat_params(struct ena_adapter
*adapter
,
4283 struct ena_com_dev_get_features_ctx
*feat
)
4285 struct net_device
*netdev
= adapter
->netdev
;
4287 /* Copy mac address */
4288 if (!is_valid_ether_addr(feat
->dev_attr
.mac_addr
)) {
4289 eth_hw_addr_random(netdev
);
4290 ether_addr_copy(adapter
->mac_addr
, netdev
->dev_addr
);
4292 ether_addr_copy(adapter
->mac_addr
, feat
->dev_attr
.mac_addr
);
4293 eth_hw_addr_set(netdev
, adapter
->mac_addr
);
4296 /* Set offload features */
4297 ena_set_dev_offloads(feat
, netdev
);
4299 adapter
->max_mtu
= feat
->dev_attr
.max_mtu
;
4300 netdev
->max_mtu
= adapter
->max_mtu
;
4301 netdev
->min_mtu
= ENA_MIN_MTU
;
4304 static int ena_rss_init_default(struct ena_adapter
*adapter
)
4306 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
4307 struct device
*dev
= &adapter
->pdev
->dev
;
4311 rc
= ena_com_rss_init(ena_dev
, ENA_RX_RSS_TABLE_LOG_SIZE
);
4313 dev_err(dev
, "Cannot init indirect table\n");
4317 for (i
= 0; i
< ENA_RX_RSS_TABLE_SIZE
; i
++) {
4318 val
= ethtool_rxfh_indir_default(i
, adapter
->num_io_queues
);
4319 rc
= ena_com_indirect_table_fill_entry(ena_dev
, i
,
4320 ENA_IO_RXQ_IDX(val
));
4322 dev_err(dev
, "Cannot fill indirect table\n");
4323 goto err_fill_indir
;
4327 rc
= ena_com_fill_hash_function(ena_dev
, ENA_ADMIN_TOEPLITZ
, NULL
,
4328 ENA_HASH_KEY_SIZE
, 0xFFFFFFFF);
4329 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
))) {
4330 dev_err(dev
, "Cannot fill hash function\n");
4331 goto err_fill_indir
;
4334 rc
= ena_com_set_default_hash_ctrl(ena_dev
);
4335 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
))) {
4336 dev_err(dev
, "Cannot fill hash control\n");
4337 goto err_fill_indir
;
4343 ena_com_rss_destroy(ena_dev
);
4349 static void ena_release_bars(struct ena_com_dev
*ena_dev
, struct pci_dev
*pdev
)
4351 int release_bars
= pci_select_bars(pdev
, IORESOURCE_MEM
) & ENA_BAR_MASK
;
4353 pci_release_selected_regions(pdev
, release_bars
);
4356 /* ena_probe - Device Initialization Routine
4357 * @pdev: PCI device information struct
4358 * @ent: entry in ena_pci_tbl
4360 * Returns 0 on success, negative on failure
4362 * ena_probe initializes an adapter identified by a pci_dev structure.
4363 * The OS initialization, configuring of the adapter private structure,
4364 * and a hardware reset occur.
4366 static int ena_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
4368 struct ena_com_dev_get_features_ctx get_feat_ctx
;
4369 struct ena_com_dev
*ena_dev
= NULL
;
4370 struct ena_adapter
*adapter
;
4371 struct net_device
*netdev
;
4372 static int adapters_found
;
4373 u32 max_num_io_queues
;
4377 dev_dbg(&pdev
->dev
, "%s\n", __func__
);
4379 rc
= pci_enable_device_mem(pdev
);
4381 dev_err(&pdev
->dev
, "pci_enable_device_mem() failed!\n");
4385 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS
));
4387 dev_err(&pdev
->dev
, "dma_set_mask_and_coherent failed %d\n", rc
);
4388 goto err_disable_device
;
4391 pci_set_master(pdev
);
4393 ena_dev
= vzalloc(sizeof(*ena_dev
));
4396 goto err_disable_device
;
4399 bars
= pci_select_bars(pdev
, IORESOURCE_MEM
) & ENA_BAR_MASK
;
4400 rc
= pci_request_selected_regions(pdev
, bars
, DRV_MODULE_NAME
);
4402 dev_err(&pdev
->dev
, "pci_request_selected_regions failed %d\n",
4404 goto err_free_ena_dev
;
4407 ena_dev
->reg_bar
= devm_ioremap(&pdev
->dev
,
4408 pci_resource_start(pdev
, ENA_REG_BAR
),
4409 pci_resource_len(pdev
, ENA_REG_BAR
));
4410 if (!ena_dev
->reg_bar
) {
4411 dev_err(&pdev
->dev
, "Failed to remap regs bar\n");
4413 goto err_free_region
;
4416 ena_dev
->ena_min_poll_delay_us
= ENA_ADMIN_POLL_DELAY_US
;
4418 ena_dev
->dmadev
= &pdev
->dev
;
4420 netdev
= alloc_etherdev_mq(sizeof(struct ena_adapter
), ENA_MAX_RINGS
);
4422 dev_err(&pdev
->dev
, "alloc_etherdev_mq failed\n");
4424 goto err_free_region
;
4427 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
4428 adapter
= netdev_priv(netdev
);
4429 adapter
->ena_dev
= ena_dev
;
4430 adapter
->netdev
= netdev
;
4431 adapter
->pdev
= pdev
;
4432 adapter
->msg_enable
= DEFAULT_MSG_ENABLE
;
4434 ena_dev
->net_device
= netdev
;
4436 pci_set_drvdata(pdev
, adapter
);
4438 rc
= ena_map_llq_mem_bar(pdev
, ena_dev
, bars
);
4440 dev_err(&pdev
->dev
, "ENA LLQ bar mapping failed\n");
4441 goto err_netdev_destroy
;
4444 rc
= ena_device_init(adapter
, pdev
, &get_feat_ctx
, &wd_state
);
4446 dev_err(&pdev
->dev
, "ENA device init failed\n");
4449 goto err_netdev_destroy
;
4452 /* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
4453 * Updated during device initialization with the real granularity
4455 ena_dev
->intr_moder_tx_interval
= ENA_INTR_INITIAL_TX_INTERVAL_USECS
;
4456 ena_dev
->intr_moder_rx_interval
= ENA_INTR_INITIAL_RX_INTERVAL_USECS
;
4457 ena_dev
->intr_delay_resolution
= ENA_DEFAULT_INTR_DELAY_RESOLUTION
;
4458 max_num_io_queues
= ena_calc_max_io_queue_num(pdev
, ena_dev
, &get_feat_ctx
);
4459 if (unlikely(!max_num_io_queues
)) {
4461 goto err_device_destroy
;
4464 ena_set_conf_feat_params(adapter
, &get_feat_ctx
);
4466 adapter
->reset_reason
= ENA_REGS_RESET_NORMAL
;
4468 adapter
->num_io_queues
= max_num_io_queues
;
4469 adapter
->max_num_io_queues
= max_num_io_queues
;
4470 adapter
->last_monitored_tx_qid
= 0;
4472 adapter
->xdp_first_ring
= 0;
4473 adapter
->xdp_num_queues
= 0;
4475 adapter
->rx_copybreak
= ENA_DEFAULT_RX_COPYBREAK
;
4476 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
4477 adapter
->disable_meta_caching
=
4478 !!(get_feat_ctx
.llq
.accel_mode
.u
.get
.supported_flags
&
4479 BIT(ENA_ADMIN_DISABLE_META_CACHING
));
4481 adapter
->wd_state
= wd_state
;
4483 snprintf(adapter
->name
, ENA_NAME_MAX_LEN
, "ena_%d", adapters_found
);
4485 rc
= ena_com_init_interrupt_moderation(adapter
->ena_dev
);
4488 "Failed to query interrupt moderation feature\n");
4489 goto err_device_destroy
;
4492 ena_init_io_rings(adapter
,
4494 adapter
->xdp_num_queues
+
4495 adapter
->num_io_queues
);
4497 netdev
->netdev_ops
= &ena_netdev_ops
;
4498 netdev
->watchdog_timeo
= TX_TIMEOUT
;
4499 ena_set_ethtool_ops(netdev
);
4501 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
4503 u64_stats_init(&adapter
->syncp
);
4505 rc
= ena_enable_msix_and_set_admin_interrupts(adapter
);
4508 "Failed to enable and set the admin interrupts\n");
4509 goto err_worker_destroy
;
4511 rc
= ena_rss_init_default(adapter
);
4512 if (rc
&& (rc
!= -EOPNOTSUPP
)) {
4513 dev_err(&pdev
->dev
, "Cannot init RSS rc: %d\n", rc
);
4517 ena_config_debug_area(adapter
);
4519 if (ena_xdp_legal_queue_count(adapter
, adapter
->num_io_queues
))
4520 netdev
->xdp_features
= NETDEV_XDP_ACT_BASIC
|
4521 NETDEV_XDP_ACT_REDIRECT
;
4523 memcpy(adapter
->netdev
->perm_addr
, adapter
->mac_addr
, netdev
->addr_len
);
4525 netif_carrier_off(netdev
);
4527 rc
= register_netdev(netdev
);
4529 dev_err(&pdev
->dev
, "Cannot register net device\n");
4533 INIT_WORK(&adapter
->reset_task
, ena_fw_reset_device
);
4535 adapter
->last_keep_alive_jiffies
= jiffies
;
4536 adapter
->keep_alive_timeout
= ENA_DEVICE_KALIVE_TIMEOUT
;
4537 adapter
->missing_tx_completion_to
= TX_TIMEOUT
;
4538 adapter
->missing_tx_completion_threshold
= MAX_NUM_OF_TIMEOUTED_PACKETS
;
4540 ena_update_hints(adapter
, &get_feat_ctx
.hw_hints
);
4542 timer_setup(&adapter
->timer_service
, ena_timer_service
, 0);
4543 mod_timer(&adapter
->timer_service
, round_jiffies(jiffies
+ HZ
));
4545 dev_info(&pdev
->dev
,
4546 "%s found at mem %lx, mac addr %pM\n",
4547 DEVICE_NAME
, (long)pci_resource_start(pdev
, 0),
4550 set_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
);
4557 ena_com_delete_debug_area(ena_dev
);
4558 ena_com_rss_destroy(ena_dev
);
4560 ena_com_dev_reset(ena_dev
, ENA_REGS_RESET_INIT_ERR
);
4561 /* stop submitting admin commands on a device that was reset */
4562 ena_com_set_admin_running_state(ena_dev
, false);
4563 ena_free_mgmnt_irq(adapter
);
4564 ena_disable_msix(adapter
);
4566 del_timer(&adapter
->timer_service
);
4568 ena_com_delete_host_info(ena_dev
);
4569 ena_com_admin_destroy(ena_dev
);
4571 free_netdev(netdev
);
4573 ena_release_bars(ena_dev
, pdev
);
4577 pci_disable_device(pdev
);
4581 /*****************************************************************************/
4583 /* __ena_shutoff - Helper used in both PCI remove/shutdown routines
4584 * @pdev: PCI device information struct
4585 * @shutdown: Is it a shutdown operation? If false, means it is a removal
4587 * __ena_shutoff is a helper routine that does the real work on shutdown and
4588 * removal paths; the difference between those paths is with regards to whether
4589 * dettach or unregister the netdevice.
4591 static void __ena_shutoff(struct pci_dev
*pdev
, bool shutdown
)
4593 struct ena_adapter
*adapter
= pci_get_drvdata(pdev
);
4594 struct ena_com_dev
*ena_dev
;
4595 struct net_device
*netdev
;
4597 ena_dev
= adapter
->ena_dev
;
4598 netdev
= adapter
->netdev
;
4600 #ifdef CONFIG_RFS_ACCEL
4601 if ((adapter
->msix_vecs
>= 1) && (netdev
->rx_cpu_rmap
)) {
4602 free_irq_cpu_rmap(netdev
->rx_cpu_rmap
);
4603 netdev
->rx_cpu_rmap
= NULL
;
4605 #endif /* CONFIG_RFS_ACCEL */
4607 /* Make sure timer and reset routine won't be called after
4608 * freeing device resources.
4610 del_timer_sync(&adapter
->timer_service
);
4611 cancel_work_sync(&adapter
->reset_task
);
4613 rtnl_lock(); /* lock released inside the below if-else block */
4614 adapter
->reset_reason
= ENA_REGS_RESET_SHUTDOWN
;
4615 ena_destroy_device(adapter
, true);
4618 netif_device_detach(netdev
);
4623 unregister_netdev(netdev
);
4624 free_netdev(netdev
);
4627 ena_com_rss_destroy(ena_dev
);
4629 ena_com_delete_debug_area(ena_dev
);
4631 ena_com_delete_host_info(ena_dev
);
4633 ena_release_bars(ena_dev
, pdev
);
4635 pci_disable_device(pdev
);
4640 /* ena_remove - Device Removal Routine
4641 * @pdev: PCI device information struct
4643 * ena_remove is called by the PCI subsystem to alert the driver
4644 * that it should release a PCI device.
4647 static void ena_remove(struct pci_dev
*pdev
)
4649 __ena_shutoff(pdev
, false);
4652 /* ena_shutdown - Device Shutdown Routine
4653 * @pdev: PCI device information struct
4655 * ena_shutdown is called by the PCI subsystem to alert the driver that
4656 * a shutdown/reboot (or kexec) is happening and device must be disabled.
4659 static void ena_shutdown(struct pci_dev
*pdev
)
4661 __ena_shutoff(pdev
, true);
4664 /* ena_suspend - PM suspend callback
4665 * @dev_d: Device information struct
4667 static int __maybe_unused
ena_suspend(struct device
*dev_d
)
4669 struct pci_dev
*pdev
= to_pci_dev(dev_d
);
4670 struct ena_adapter
*adapter
= pci_get_drvdata(pdev
);
4672 ena_increase_stat(&adapter
->dev_stats
.suspend
, 1, &adapter
->syncp
);
4675 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
4677 "Ignoring device reset request as the device is being suspended\n");
4678 clear_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
4680 ena_destroy_device(adapter
, true);
4685 /* ena_resume - PM resume callback
4686 * @dev_d: Device information struct
4688 static int __maybe_unused
ena_resume(struct device
*dev_d
)
4690 struct ena_adapter
*adapter
= dev_get_drvdata(dev_d
);
4693 ena_increase_stat(&adapter
->dev_stats
.resume
, 1, &adapter
->syncp
);
4696 rc
= ena_restore_device(adapter
);
4701 static SIMPLE_DEV_PM_OPS(ena_pm_ops
, ena_suspend
, ena_resume
);
4703 static struct pci_driver ena_pci_driver
= {
4704 .name
= DRV_MODULE_NAME
,
4705 .id_table
= ena_pci_tbl
,
4707 .remove
= ena_remove
,
4708 .shutdown
= ena_shutdown
,
4709 .driver
.pm
= &ena_pm_ops
,
4710 .sriov_configure
= pci_sriov_configure_simple
,
4713 static int __init
ena_init(void)
4717 ena_wq
= create_singlethread_workqueue(DRV_MODULE_NAME
);
4719 pr_err("Failed to create workqueue\n");
4723 ret
= pci_register_driver(&ena_pci_driver
);
4725 destroy_workqueue(ena_wq
);
4730 static void __exit
ena_cleanup(void)
4732 pci_unregister_driver(&ena_pci_driver
);
4735 destroy_workqueue(ena_wq
);
4740 /******************************************************************************
4741 ******************************** AENQ Handlers *******************************
4742 *****************************************************************************/
4743 /* ena_update_on_link_change:
4744 * Notify the network interface about the change in link status
4746 static void ena_update_on_link_change(void *adapter_data
,
4747 struct ena_admin_aenq_entry
*aenq_e
)
4749 struct ena_adapter
*adapter
= (struct ena_adapter
*)adapter_data
;
4750 struct ena_admin_aenq_link_change_desc
*aenq_desc
=
4751 (struct ena_admin_aenq_link_change_desc
*)aenq_e
;
4752 int status
= aenq_desc
->flags
&
4753 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK
;
4756 netif_dbg(adapter
, ifup
, adapter
->netdev
, "%s\n", __func__
);
4757 set_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
);
4758 if (!test_bit(ENA_FLAG_ONGOING_RESET
, &adapter
->flags
))
4759 netif_carrier_on(adapter
->netdev
);
4761 clear_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
);
4762 netif_carrier_off(adapter
->netdev
);
4766 static void ena_keep_alive_wd(void *adapter_data
,
4767 struct ena_admin_aenq_entry
*aenq_e
)
4769 struct ena_adapter
*adapter
= (struct ena_adapter
*)adapter_data
;
4770 struct ena_admin_aenq_keep_alive_desc
*desc
;
4774 desc
= (struct ena_admin_aenq_keep_alive_desc
*)aenq_e
;
4775 adapter
->last_keep_alive_jiffies
= jiffies
;
4777 rx_drops
= ((u64
)desc
->rx_drops_high
<< 32) | desc
->rx_drops_low
;
4778 tx_drops
= ((u64
)desc
->tx_drops_high
<< 32) | desc
->tx_drops_low
;
4780 u64_stats_update_begin(&adapter
->syncp
);
4781 /* These stats are accumulated by the device, so the counters indicate
4782 * all drops since last reset.
4784 adapter
->dev_stats
.rx_drops
= rx_drops
;
4785 adapter
->dev_stats
.tx_drops
= tx_drops
;
4786 u64_stats_update_end(&adapter
->syncp
);
4789 static void ena_notification(void *adapter_data
,
4790 struct ena_admin_aenq_entry
*aenq_e
)
4792 struct ena_adapter
*adapter
= (struct ena_adapter
*)adapter_data
;
4793 struct ena_admin_ena_hw_hints
*hints
;
4795 WARN(aenq_e
->aenq_common_desc
.group
!= ENA_ADMIN_NOTIFICATION
,
4796 "Invalid group(%x) expected %x\n",
4797 aenq_e
->aenq_common_desc
.group
,
4798 ENA_ADMIN_NOTIFICATION
);
4800 switch (aenq_e
->aenq_common_desc
.syndrome
) {
4801 case ENA_ADMIN_UPDATE_HINTS
:
4802 hints
= (struct ena_admin_ena_hw_hints
*)
4803 (&aenq_e
->inline_data_w4
);
4804 ena_update_hints(adapter
, hints
);
4807 netif_err(adapter
, drv
, adapter
->netdev
,
4808 "Invalid aenq notification link state %d\n",
4809 aenq_e
->aenq_common_desc
.syndrome
);
4813 /* This handler will called for unknown event group or unimplemented handlers*/
4814 static void unimplemented_aenq_handler(void *data
,
4815 struct ena_admin_aenq_entry
*aenq_e
)
4817 struct ena_adapter
*adapter
= (struct ena_adapter
*)data
;
4819 netif_err(adapter
, drv
, adapter
->netdev
,
4820 "Unknown event was received or event with unimplemented handler\n");
4823 static struct ena_aenq_handlers aenq_handlers
= {
4825 [ENA_ADMIN_LINK_CHANGE
] = ena_update_on_link_change
,
4826 [ENA_ADMIN_NOTIFICATION
] = ena_notification
,
4827 [ENA_ADMIN_KEEP_ALIVE
] = ena_keep_alive_wd
,
4829 .unimplemented_handler
= unimplemented_aenq_handler
4832 module_init(ena_init
);
4833 module_exit(ena_cleanup
);