1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. */
4 #include <linux/bpf_trace.h>
5 #include <linux/stringify.h>
6 #include <net/xdp_sock_drv.h>
10 #include "i40e_txrx_common.h"
13 void i40e_clear_rx_bi_zc(struct i40e_ring
*rx_ring
)
15 memset(rx_ring
->rx_bi_zc
, 0,
16 sizeof(*rx_ring
->rx_bi_zc
) * rx_ring
->count
);
19 static struct xdp_buff
**i40e_rx_bi(struct i40e_ring
*rx_ring
, u32 idx
)
21 return &rx_ring
->rx_bi_zc
[idx
];
25 * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer
26 * @rx_ring: Current rx ring
27 * @pool_present: is pool for XSK present
29 * Try allocating memory and return ENOMEM, if failed to allocate.
30 * If allocation was successful, substitute buffer with allocated one.
31 * Returns 0 on success, negative on failure
33 static int i40e_realloc_rx_xdp_bi(struct i40e_ring
*rx_ring
, bool pool_present
)
35 size_t elem_size
= pool_present
? sizeof(*rx_ring
->rx_bi_zc
) :
36 sizeof(*rx_ring
->rx_bi
);
37 void *sw_ring
= kcalloc(rx_ring
->count
, elem_size
, GFP_KERNEL
);
43 kfree(rx_ring
->rx_bi
);
44 rx_ring
->rx_bi
= NULL
;
45 rx_ring
->rx_bi_zc
= sw_ring
;
47 kfree(rx_ring
->rx_bi_zc
);
48 rx_ring
->rx_bi_zc
= NULL
;
49 rx_ring
->rx_bi
= sw_ring
;
55 * i40e_realloc_rx_bi_zc - reallocate rx SW rings
57 * @zc: is zero copy set
59 * Reallocate buffer for rx_rings that might be used by XSK.
60 * XDP requires more memory, than rx_buf provides.
61 * Returns 0 on success, negative on failure
63 int i40e_realloc_rx_bi_zc(struct i40e_vsi
*vsi
, bool zc
)
65 struct i40e_ring
*rx_ring
;
68 for_each_set_bit(q
, vsi
->af_xdp_zc_qps
, vsi
->alloc_queue_pairs
) {
69 rx_ring
= vsi
->rx_rings
[q
];
70 if (i40e_realloc_rx_xdp_bi(rx_ring
, zc
))
77 * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
81 * @qid: Rx ring to associate buffer pool with
83 * Returns 0 on success, <0 on failure
85 static int i40e_xsk_pool_enable(struct i40e_vsi
*vsi
,
86 struct xsk_buff_pool
*pool
,
89 struct net_device
*netdev
= vsi
->netdev
;
93 if (vsi
->type
!= I40E_VSI_MAIN
)
96 if (qid
>= vsi
->num_queue_pairs
)
99 if (qid
>= netdev
->real_num_rx_queues
||
100 qid
>= netdev
->real_num_tx_queues
)
103 err
= xsk_pool_dma_map(pool
, &vsi
->back
->pdev
->dev
, I40E_RX_DMA_ATTR
);
107 set_bit(qid
, vsi
->af_xdp_zc_qps
);
109 if_running
= netif_running(vsi
->netdev
) && i40e_enabled_xdp_vsi(vsi
);
112 err
= i40e_queue_pair_disable(vsi
, qid
);
116 err
= i40e_realloc_rx_xdp_bi(vsi
->rx_rings
[qid
], true);
120 err
= i40e_queue_pair_enable(vsi
, qid
);
124 /* Kick start the NAPI context so that receiving will start */
125 err
= i40e_xsk_wakeup(vsi
->netdev
, qid
, XDP_WAKEUP_RX
);
134 * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a
137 * @qid: Rx ring to associate buffer pool with
139 * Returns 0 on success, <0 on failure
141 static int i40e_xsk_pool_disable(struct i40e_vsi
*vsi
, u16 qid
)
143 struct net_device
*netdev
= vsi
->netdev
;
144 struct xsk_buff_pool
*pool
;
148 pool
= xsk_get_pool_from_qid(netdev
, qid
);
152 if_running
= netif_running(vsi
->netdev
) && i40e_enabled_xdp_vsi(vsi
);
155 err
= i40e_queue_pair_disable(vsi
, qid
);
160 clear_bit(qid
, vsi
->af_xdp_zc_qps
);
161 xsk_pool_dma_unmap(pool
, I40E_RX_DMA_ATTR
);
164 err
= i40e_realloc_rx_xdp_bi(vsi
->rx_rings
[qid
], false);
167 err
= i40e_queue_pair_enable(vsi
, qid
);
176 * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from
179 * @pool: Buffer pool to enable/associate to a ring, or NULL to disable
180 * @qid: Rx ring to (dis)associate buffer pool (from)to
182 * This function enables or disables a buffer pool to a certain ring.
184 * Returns 0 on success, <0 on failure
186 int i40e_xsk_pool_setup(struct i40e_vsi
*vsi
, struct xsk_buff_pool
*pool
,
189 return pool
? i40e_xsk_pool_enable(vsi
, pool
, qid
) :
190 i40e_xsk_pool_disable(vsi
, qid
);
194 * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
196 * @xdp: xdp_buff used as input to the XDP program
197 * @xdp_prog: XDP program to run
199 * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
201 static int i40e_run_xdp_zc(struct i40e_ring
*rx_ring
, struct xdp_buff
*xdp
,
202 struct bpf_prog
*xdp_prog
)
204 int err
, result
= I40E_XDP_PASS
;
205 struct i40e_ring
*xdp_ring
;
208 act
= bpf_prog_run_xdp(xdp_prog
, xdp
);
210 if (likely(act
== XDP_REDIRECT
)) {
211 err
= xdp_do_redirect(rx_ring
->netdev
, xdp
, xdp_prog
);
213 return I40E_XDP_REDIR
;
214 if (xsk_uses_need_wakeup(rx_ring
->xsk_pool
) && err
== -ENOBUFS
)
215 result
= I40E_XDP_EXIT
;
217 result
= I40E_XDP_CONSUMED
;
225 xdp_ring
= rx_ring
->vsi
->xdp_rings
[rx_ring
->queue_index
];
226 result
= i40e_xmit_xdp_tx_ring(xdp
, xdp_ring
);
227 if (result
== I40E_XDP_CONSUMED
)
231 result
= I40E_XDP_CONSUMED
;
234 bpf_warn_invalid_xdp_action(rx_ring
->netdev
, xdp_prog
, act
);
237 result
= I40E_XDP_CONSUMED
;
239 trace_xdp_exception(rx_ring
->netdev
, xdp_prog
, act
);
244 bool i40e_alloc_rx_buffers_zc(struct i40e_ring
*rx_ring
, u16 count
)
246 u16 ntu
= rx_ring
->next_to_use
;
247 union i40e_rx_desc
*rx_desc
;
248 struct xdp_buff
**xdp
;
252 rx_desc
= I40E_RX_DESC(rx_ring
, ntu
);
253 xdp
= i40e_rx_bi(rx_ring
, ntu
);
255 nb_buffs
= min_t(u16
, count
, rx_ring
->count
- ntu
);
256 nb_buffs
= xsk_buff_alloc_batch(rx_ring
->xsk_pool
, xdp
, nb_buffs
);
262 dma
= xsk_buff_xdp_get_dma(*xdp
);
263 rx_desc
->read
.pkt_addr
= cpu_to_le64(dma
);
264 rx_desc
->read
.hdr_addr
= 0;
271 if (ntu
== rx_ring
->count
) {
272 rx_desc
= I40E_RX_DESC(rx_ring
, 0);
276 /* clear the status bits for the next_to_use descriptor */
277 rx_desc
->wb
.qword1
.status_error_len
= 0;
278 i40e_release_rx_desc(rx_ring
, ntu
);
280 return count
== nb_buffs
;
284 * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer
288 * This functions allocates a new skb from a zero-copy Rx buffer.
290 * Returns the skb, or NULL on failure.
292 static struct sk_buff
*i40e_construct_skb_zc(struct i40e_ring
*rx_ring
,
293 struct xdp_buff
*xdp
)
295 unsigned int totalsize
= xdp
->data_end
- xdp
->data_meta
;
296 unsigned int metasize
= xdp
->data
- xdp
->data_meta
;
297 struct skb_shared_info
*sinfo
= NULL
;
301 if (unlikely(xdp_buff_has_frags(xdp
))) {
302 sinfo
= xdp_get_shared_info_from_buff(xdp
);
303 nr_frags
= sinfo
->nr_frags
;
305 net_prefetch(xdp
->data_meta
);
307 /* allocate a skb to store the frags */
308 skb
= __napi_alloc_skb(&rx_ring
->q_vector
->napi
, totalsize
,
309 GFP_ATOMIC
| __GFP_NOWARN
);
313 memcpy(__skb_put(skb
, totalsize
), xdp
->data_meta
,
314 ALIGN(totalsize
, sizeof(long)));
317 skb_metadata_set(skb
, metasize
);
318 __skb_pull(skb
, metasize
);
321 if (likely(!xdp_buff_has_frags(xdp
)))
324 for (int i
= 0; i
< nr_frags
; i
++) {
325 struct skb_shared_info
*skinfo
= skb_shinfo(skb
);
326 skb_frag_t
*frag
= &sinfo
->frags
[i
];
330 page
= dev_alloc_page();
335 addr
= page_to_virt(page
);
337 memcpy(addr
, skb_frag_page(frag
), skb_frag_size(frag
));
339 __skb_fill_page_desc_noacc(skinfo
, skinfo
->nr_frags
++,
340 addr
, 0, skb_frag_size(frag
));
348 static void i40e_handle_xdp_result_zc(struct i40e_ring
*rx_ring
,
349 struct xdp_buff
*xdp_buff
,
350 union i40e_rx_desc
*rx_desc
,
351 unsigned int *rx_packets
,
352 unsigned int *rx_bytes
,
353 unsigned int xdp_res
,
359 *rx_bytes
= xdp_get_buff_len(xdp_buff
);
361 if (likely(xdp_res
== I40E_XDP_REDIR
) || xdp_res
== I40E_XDP_TX
)
364 if (xdp_res
== I40E_XDP_EXIT
) {
369 if (xdp_res
== I40E_XDP_CONSUMED
) {
370 xsk_buff_free(xdp_buff
);
373 if (xdp_res
== I40E_XDP_PASS
) {
374 /* NB! We are not checking for errors using
375 * i40e_test_staterr with
376 * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
377 * SBP is *not* set in PRT_SBPVSI (default not set).
379 skb
= i40e_construct_skb_zc(rx_ring
, xdp_buff
);
381 rx_ring
->rx_stats
.alloc_buff_failed
++;
387 if (eth_skb_pad(skb
)) {
393 i40e_process_skb_fields(rx_ring
, rx_desc
, skb
);
394 napi_gro_receive(&rx_ring
->q_vector
->napi
, skb
);
398 /* Should never get here, as all valid cases have been handled already.
404 i40e_add_xsk_frag(struct i40e_ring
*rx_ring
, struct xdp_buff
*first
,
405 struct xdp_buff
*xdp
, const unsigned int size
)
407 struct skb_shared_info
*sinfo
= xdp_get_shared_info_from_buff(first
);
409 if (!xdp_buff_has_frags(first
)) {
411 sinfo
->xdp_frags_size
= 0;
412 xdp_buff_set_frags_flag(first
);
415 if (unlikely(sinfo
->nr_frags
== MAX_SKB_FRAGS
)) {
416 xsk_buff_free(first
);
420 __skb_fill_page_desc_noacc(sinfo
, sinfo
->nr_frags
++,
421 virt_to_page(xdp
->data_hard_start
), 0, size
);
422 sinfo
->xdp_frags_size
+= size
;
423 xsk_buff_add_frag(xdp
);
429 * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
431 * @budget: NAPI budget
433 * Returns amount of work completed
435 int i40e_clean_rx_irq_zc(struct i40e_ring
*rx_ring
, int budget
)
437 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
438 u16 next_to_process
= rx_ring
->next_to_process
;
439 u16 next_to_clean
= rx_ring
->next_to_clean
;
440 u16 count_mask
= rx_ring
->count
- 1;
441 unsigned int xdp_res
, xdp_xmit
= 0;
442 struct xdp_buff
*first
= NULL
;
443 struct bpf_prog
*xdp_prog
;
444 bool failure
= false;
447 if (next_to_process
!= next_to_clean
)
448 first
= *i40e_rx_bi(rx_ring
, next_to_clean
);
450 /* NB! xdp_prog will always be !NULL, due to the fact that
451 * this path is enabled by setting an XDP program.
453 xdp_prog
= READ_ONCE(rx_ring
->xdp_prog
);
455 while (likely(total_rx_packets
< (unsigned int)budget
)) {
456 union i40e_rx_desc
*rx_desc
;
457 unsigned int rx_packets
;
458 unsigned int rx_bytes
;
463 rx_desc
= I40E_RX_DESC(rx_ring
, next_to_process
);
464 qword
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
466 /* This memory barrier is needed to keep us from reading
467 * any other fields out of the rx_desc until we have
468 * verified the descriptor has been written back.
472 if (i40e_rx_is_programming_status(qword
)) {
473 i40e_clean_programming_status(rx_ring
,
474 rx_desc
->raw
.qword
[0],
476 bi
= *i40e_rx_bi(rx_ring
, next_to_process
);
478 next_to_process
= (next_to_process
+ 1) & count_mask
;
482 size
= (qword
& I40E_RXD_QW1_LENGTH_PBUF_MASK
) >>
483 I40E_RXD_QW1_LENGTH_PBUF_SHIFT
;
487 bi
= *i40e_rx_bi(rx_ring
, next_to_process
);
488 xsk_buff_set_size(bi
, size
);
489 xsk_buff_dma_sync_for_cpu(bi
, rx_ring
->xsk_pool
);
493 else if (i40e_add_xsk_frag(rx_ring
, first
, bi
, size
))
496 next_to_process
= (next_to_process
+ 1) & count_mask
;
498 if (i40e_is_non_eop(rx_ring
, rx_desc
))
501 xdp_res
= i40e_run_xdp_zc(rx_ring
, first
, xdp_prog
);
502 i40e_handle_xdp_result_zc(rx_ring
, first
, rx_desc
, &rx_packets
,
503 &rx_bytes
, xdp_res
, &failure
);
505 next_to_clean
= next_to_process
;
508 total_rx_packets
+= rx_packets
;
509 total_rx_bytes
+= rx_bytes
;
510 xdp_xmit
|= xdp_res
& (I40E_XDP_TX
| I40E_XDP_REDIR
);
514 rx_ring
->next_to_clean
= next_to_clean
;
515 rx_ring
->next_to_process
= next_to_process
;
516 cleaned_count
= (next_to_clean
- rx_ring
->next_to_use
- 1) & count_mask
;
518 if (cleaned_count
>= I40E_RX_BUFFER_WRITE
)
519 failure
|= !i40e_alloc_rx_buffers_zc(rx_ring
, cleaned_count
);
521 i40e_finalize_xdp_rx(rx_ring
, xdp_xmit
);
522 i40e_update_rx_stats(rx_ring
, total_rx_bytes
, total_rx_packets
);
524 if (xsk_uses_need_wakeup(rx_ring
->xsk_pool
)) {
525 if (failure
|| next_to_clean
== rx_ring
->next_to_use
)
526 xsk_set_rx_need_wakeup(rx_ring
->xsk_pool
);
528 xsk_clear_rx_need_wakeup(rx_ring
->xsk_pool
);
530 return (int)total_rx_packets
;
532 return failure
? budget
: (int)total_rx_packets
;
535 static void i40e_xmit_pkt(struct i40e_ring
*xdp_ring
, struct xdp_desc
*desc
,
536 unsigned int *total_bytes
)
538 u32 cmd
= I40E_TX_DESC_CMD_ICRC
| xsk_is_eop_desc(desc
);
539 struct i40e_tx_desc
*tx_desc
;
542 dma
= xsk_buff_raw_get_dma(xdp_ring
->xsk_pool
, desc
->addr
);
543 xsk_buff_raw_dma_sync_for_device(xdp_ring
->xsk_pool
, dma
, desc
->len
);
545 tx_desc
= I40E_TX_DESC(xdp_ring
, xdp_ring
->next_to_use
++);
546 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
547 tx_desc
->cmd_type_offset_bsz
= build_ctob(cmd
, 0, desc
->len
, 0);
549 *total_bytes
+= desc
->len
;
552 static void i40e_xmit_pkt_batch(struct i40e_ring
*xdp_ring
, struct xdp_desc
*desc
,
553 unsigned int *total_bytes
)
555 u16 ntu
= xdp_ring
->next_to_use
;
556 struct i40e_tx_desc
*tx_desc
;
560 loop_unrolled_for(i
= 0; i
< PKTS_PER_BATCH
; i
++) {
561 u32 cmd
= I40E_TX_DESC_CMD_ICRC
| xsk_is_eop_desc(&desc
[i
]);
563 dma
= xsk_buff_raw_get_dma(xdp_ring
->xsk_pool
, desc
[i
].addr
);
564 xsk_buff_raw_dma_sync_for_device(xdp_ring
->xsk_pool
, dma
, desc
[i
].len
);
566 tx_desc
= I40E_TX_DESC(xdp_ring
, ntu
++);
567 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
568 tx_desc
->cmd_type_offset_bsz
= build_ctob(cmd
, 0, desc
[i
].len
, 0);
570 *total_bytes
+= desc
[i
].len
;
573 xdp_ring
->next_to_use
= ntu
;
576 static void i40e_fill_tx_hw_ring(struct i40e_ring
*xdp_ring
, struct xdp_desc
*descs
, u32 nb_pkts
,
577 unsigned int *total_bytes
)
579 u32 batched
, leftover
, i
;
581 batched
= nb_pkts
& ~(PKTS_PER_BATCH
- 1);
582 leftover
= nb_pkts
& (PKTS_PER_BATCH
- 1);
583 for (i
= 0; i
< batched
; i
+= PKTS_PER_BATCH
)
584 i40e_xmit_pkt_batch(xdp_ring
, &descs
[i
], total_bytes
);
585 for (i
= batched
; i
< batched
+ leftover
; i
++)
586 i40e_xmit_pkt(xdp_ring
, &descs
[i
], total_bytes
);
589 static void i40e_set_rs_bit(struct i40e_ring
*xdp_ring
)
591 u16 ntu
= xdp_ring
->next_to_use
? xdp_ring
->next_to_use
- 1 : xdp_ring
->count
- 1;
592 struct i40e_tx_desc
*tx_desc
;
594 tx_desc
= I40E_TX_DESC(xdp_ring
, ntu
);
595 tx_desc
->cmd_type_offset_bsz
|= cpu_to_le64(I40E_TX_DESC_CMD_RS
<< I40E_TXD_QW1_CMD_SHIFT
);
599 * i40e_xmit_zc - Performs zero-copy Tx AF_XDP
600 * @xdp_ring: XDP Tx ring
601 * @budget: NAPI budget
603 * Returns true if the work is finished.
605 static bool i40e_xmit_zc(struct i40e_ring
*xdp_ring
, unsigned int budget
)
607 struct xdp_desc
*descs
= xdp_ring
->xsk_pool
->tx_descs
;
608 u32 nb_pkts
, nb_processed
= 0;
609 unsigned int total_bytes
= 0;
611 nb_pkts
= xsk_tx_peek_release_desc_batch(xdp_ring
->xsk_pool
, budget
);
615 if (xdp_ring
->next_to_use
+ nb_pkts
>= xdp_ring
->count
) {
616 nb_processed
= xdp_ring
->count
- xdp_ring
->next_to_use
;
617 i40e_fill_tx_hw_ring(xdp_ring
, descs
, nb_processed
, &total_bytes
);
618 xdp_ring
->next_to_use
= 0;
621 i40e_fill_tx_hw_ring(xdp_ring
, &descs
[nb_processed
], nb_pkts
- nb_processed
,
624 /* Request an interrupt for the last frame and bump tail ptr. */
625 i40e_set_rs_bit(xdp_ring
);
626 i40e_xdp_ring_update_tail(xdp_ring
);
628 i40e_update_tx_stats(xdp_ring
, nb_pkts
, total_bytes
);
630 return nb_pkts
< budget
;
634 * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry
635 * @tx_ring: XDP Tx ring
636 * @tx_bi: Tx buffer info to clean
638 static void i40e_clean_xdp_tx_buffer(struct i40e_ring
*tx_ring
,
639 struct i40e_tx_buffer
*tx_bi
)
641 xdp_return_frame(tx_bi
->xdpf
);
642 tx_ring
->xdp_tx_active
--;
643 dma_unmap_single(tx_ring
->dev
,
644 dma_unmap_addr(tx_bi
, dma
),
645 dma_unmap_len(tx_bi
, len
), DMA_TO_DEVICE
);
646 dma_unmap_len_set(tx_bi
, len
, 0);
650 * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries
652 * @tx_ring: XDP Tx ring
654 * Returns true if cleanup/transmission is done.
656 bool i40e_clean_xdp_tx_irq(struct i40e_vsi
*vsi
, struct i40e_ring
*tx_ring
)
658 struct xsk_buff_pool
*bp
= tx_ring
->xsk_pool
;
659 u32 i
, completed_frames
, xsk_frames
= 0;
660 u32 head_idx
= i40e_get_head(tx_ring
);
661 struct i40e_tx_buffer
*tx_bi
;
664 if (head_idx
< tx_ring
->next_to_clean
)
665 head_idx
+= tx_ring
->count
;
666 completed_frames
= head_idx
- tx_ring
->next_to_clean
;
668 if (completed_frames
== 0)
671 if (likely(!tx_ring
->xdp_tx_active
)) {
672 xsk_frames
= completed_frames
;
676 ntc
= tx_ring
->next_to_clean
;
678 for (i
= 0; i
< completed_frames
; i
++) {
679 tx_bi
= &tx_ring
->tx_bi
[ntc
];
682 i40e_clean_xdp_tx_buffer(tx_ring
, tx_bi
);
688 if (++ntc
>= tx_ring
->count
)
693 tx_ring
->next_to_clean
+= completed_frames
;
694 if (unlikely(tx_ring
->next_to_clean
>= tx_ring
->count
))
695 tx_ring
->next_to_clean
-= tx_ring
->count
;
698 xsk_tx_completed(bp
, xsk_frames
);
700 i40e_arm_wb(tx_ring
, vsi
, completed_frames
);
703 if (xsk_uses_need_wakeup(tx_ring
->xsk_pool
))
704 xsk_set_tx_need_wakeup(tx_ring
->xsk_pool
);
706 return i40e_xmit_zc(tx_ring
, I40E_DESC_UNUSED(tx_ring
));
710 * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup
711 * @dev: the netdevice
712 * @queue_id: queue id to wake up
713 * @flags: ignored in our case since we have Rx and Tx in the same NAPI.
715 * Returns <0 for errors, 0 otherwise.
717 int i40e_xsk_wakeup(struct net_device
*dev
, u32 queue_id
, u32 flags
)
719 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
720 struct i40e_vsi
*vsi
= np
->vsi
;
721 struct i40e_pf
*pf
= vsi
->back
;
722 struct i40e_ring
*ring
;
724 if (test_bit(__I40E_CONFIG_BUSY
, pf
->state
))
727 if (test_bit(__I40E_VSI_DOWN
, vsi
->state
))
730 if (!i40e_enabled_xdp_vsi(vsi
))
733 if (queue_id
>= vsi
->num_queue_pairs
)
736 if (!vsi
->xdp_rings
[queue_id
]->xsk_pool
)
739 ring
= vsi
->xdp_rings
[queue_id
];
741 /* The idea here is that if NAPI is running, mark a miss, so
742 * it will run again. If not, trigger an interrupt and
743 * schedule the NAPI from interrupt context. If NAPI would be
744 * scheduled here, the interrupt affinity would not be
747 if (!napi_if_scheduled_mark_missed(&ring
->q_vector
->napi
))
748 i40e_force_wb(vsi
, ring
->q_vector
);
753 void i40e_xsk_clean_rx_ring(struct i40e_ring
*rx_ring
)
755 u16 count_mask
= rx_ring
->count
- 1;
756 u16 ntc
= rx_ring
->next_to_clean
;
757 u16 ntu
= rx_ring
->next_to_use
;
759 for ( ; ntc
!= ntu
; ntc
= (ntc
+ 1) & count_mask
) {
760 struct xdp_buff
*rx_bi
= *i40e_rx_bi(rx_ring
, ntc
);
762 xsk_buff_free(rx_bi
);
767 * i40e_xsk_clean_tx_ring - Clean the XDP Tx ring on shutdown
768 * @tx_ring: XDP Tx ring
770 void i40e_xsk_clean_tx_ring(struct i40e_ring
*tx_ring
)
772 u16 ntc
= tx_ring
->next_to_clean
, ntu
= tx_ring
->next_to_use
;
773 struct xsk_buff_pool
*bp
= tx_ring
->xsk_pool
;
774 struct i40e_tx_buffer
*tx_bi
;
778 tx_bi
= &tx_ring
->tx_bi
[ntc
];
781 i40e_clean_xdp_tx_buffer(tx_ring
, tx_bi
);
788 if (ntc
>= tx_ring
->count
)
793 xsk_tx_completed(bp
, xsk_frames
);
797 * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP
798 * buffer pool attached
801 * Returns true if any of the Rx rings has an AF_XDP buffer pool attached
803 bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi
*vsi
)
805 struct net_device
*netdev
= vsi
->netdev
;
808 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
809 if (xsk_get_pool_from_qid(netdev
, i
))