]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
eth: fbnic: Add support for XDP_TX action
authorMohsin Bashir <mohsin.bashr@gmail.com>
Wed, 13 Aug 2025 22:13:17 +0000 (15:13 -0700)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 19 Aug 2025 08:51:16 +0000 (10:51 +0200)
Add support for XDP_TX action and cleaning the associated work.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Mohsin Bashir <mohsin.bashr@gmail.com>
Link: https://patch.msgid.link/20250813221319.3367670-8-mohsin.bashr@gmail.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
drivers/net/ethernet/meta/fbnic/fbnic_txrx.c

index 8c7709f707e68fa322986e93efcecba0dd40b23f..de3610a7491a82c4aa9bc1d0ae669d9d78d0ab18 100644 (file)
@@ -19,6 +19,7 @@
 enum {
        FBNIC_XDP_PASS = 0,
        FBNIC_XDP_CONSUME,
+       FBNIC_XDP_TX,
        FBNIC_XDP_LEN_ERR,
 };
 
@@ -1020,6 +1021,80 @@ static struct sk_buff *fbnic_build_skb(struct fbnic_napi_vector *nv,
        return skb;
 }
 
+static long fbnic_pkt_tx(struct fbnic_napi_vector *nv,
+                        struct fbnic_pkt_buff *pkt)
+{
+       struct fbnic_ring *ring = &nv->qt[0].sub1;
+       int size, offset, nsegs = 1, data_len = 0;
+       unsigned int tail = ring->tail;
+       struct skb_shared_info *shinfo;
+       skb_frag_t *frag = NULL;
+       struct page *page;
+       dma_addr_t dma;
+       __le64 *twd;
+
+       if (unlikely(xdp_buff_has_frags(&pkt->buff))) {
+               shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
+               nsegs += shinfo->nr_frags;
+               data_len = shinfo->xdp_frags_size;
+               frag = &shinfo->frags[0];
+       }
+
+       if (fbnic_desc_unused(ring) < nsegs)
+               return -FBNIC_XDP_CONSUME;
+
+       page = virt_to_page(pkt->buff.data_hard_start);
+       offset = offset_in_page(pkt->buff.data);
+       dma = page_pool_get_dma_addr(page);
+
+       size = pkt->buff.data_end - pkt->buff.data;
+
+       while (nsegs--) {
+               dma_sync_single_range_for_device(nv->dev, dma, offset, size,
+                                                DMA_BIDIRECTIONAL);
+               dma += offset;
+
+               ring->tx_buf[tail] = page;
+
+               twd = &ring->desc[tail];
+               *twd = cpu_to_le64(FIELD_PREP(FBNIC_TWD_ADDR_MASK, dma) |
+                                  FIELD_PREP(FBNIC_TWD_LEN_MASK, size) |
+                                  FIELD_PREP(FBNIC_TWD_TYPE_MASK,
+                                             FBNIC_TWD_TYPE_AL));
+
+               tail++;
+               tail &= ring->size_mask;
+
+               if (!data_len)
+                       break;
+
+               offset = skb_frag_off(frag);
+               page = skb_frag_page(frag);
+               dma = page_pool_get_dma_addr(page);
+
+               size = skb_frag_size(frag);
+               data_len -= size;
+               frag++;
+       }
+
+       *twd |= FBNIC_TWD_TYPE(LAST_AL);
+
+       ring->tail = tail;
+
+       return -FBNIC_XDP_TX;
+}
+
+static void fbnic_pkt_commit_tail(struct fbnic_napi_vector *nv,
+                                 unsigned int pkt_tail)
+{
+       struct fbnic_ring *ring = &nv->qt[0].sub1;
+
+       /* Force DMA writes to flush before writing to tail */
+       dma_wmb();
+
+       writel(pkt_tail, ring->doorbell);
+}
+
 static struct sk_buff *fbnic_run_xdp(struct fbnic_napi_vector *nv,
                                     struct fbnic_pkt_buff *pkt)
 {
@@ -1040,6 +1115,8 @@ static struct sk_buff *fbnic_run_xdp(struct fbnic_napi_vector *nv,
        case XDP_PASS:
 xdp_pass:
                return fbnic_build_skb(nv, pkt);
+       case XDP_TX:
+               return ERR_PTR(fbnic_pkt_tx(nv, pkt));
        default:
                bpf_warn_invalid_xdp_action(nv->napi.dev, xdp_prog, act);
                fallthrough;
@@ -1104,10 +1181,10 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
                           struct fbnic_q_triad *qt, int budget)
 {
        unsigned int packets = 0, bytes = 0, dropped = 0, alloc_failed = 0;
+       s32 head0 = -1, head1 = -1, pkt_tail = -1;
        u64 csum_complete = 0, csum_none = 0;
        struct fbnic_ring *rcq = &qt->cmpl;
        struct fbnic_pkt_buff *pkt;
-       s32 head0 = -1, head1 = -1;
        __le64 *raw_rcd, done;
        u32 head = rcq->head;
 
@@ -1163,6 +1240,9 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
                                bytes += skb->len;
 
                                napi_gro_receive(&nv->napi, skb);
+                       } else if (skb == ERR_PTR(-FBNIC_XDP_TX)) {
+                               pkt_tail = nv->qt[0].sub1.tail;
+                               bytes += xdp_get_buff_len(&pkt->buff);
                        } else {
                                if (!skb) {
                                        alloc_failed++;
@@ -1198,6 +1278,9 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
        rcq->stats.rx.csum_none += csum_none;
        u64_stats_update_end(&rcq->stats.syncp);
 
+       if (pkt_tail >= 0)
+               fbnic_pkt_commit_tail(nv, pkt_tail);
+
        /* Unmap and free processed buffers */
        if (head0 >= 0)
                fbnic_clean_bdq(nv, budget, &qt->sub0, head0);