]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
libeth, idpf: use truesize as XDP RxQ info frag_size
authorLarysa Zaremba <larysa.zaremba@intel.com>
Thu, 5 Mar 2026 11:12:48 +0000 (12:12 +0100)
committerJakub Kicinski <kuba@kernel.org>
Thu, 5 Mar 2026 16:02:05 +0000 (08:02 -0800)
The only user of frag_size field in XDP RxQ info is
bpf_xdp_frags_increase_tail(). It clearly expects whole buffer size instead
of DMA write size. Different assumptions in idpf driver configuration lead
to negative tailroom.

To make it worse, buffer sizes are not actually uniform in idpf when
splitq is enabled, as there are several buffer queues, so rxq->rx_buf_size
is meaningless in this case.

Use truesize of the first bufq in AF_XDP ZC, as there is only one. Disable
growing tail for regular splitq.

Fixes: ac8a861f632e ("idpf: prepare structures to support XDP")
Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
Link: https://patch.msgid.link/20260305111253.2317394-8-larysa.zaremba@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/intel/idpf/xdp.c
drivers/net/ethernet/intel/idpf/xsk.c
drivers/net/ethernet/intel/libeth/xsk.c
include/net/libeth/xsk.h

index 6ac9c6624c2ae47f1b22f24c4a634b11cd703125..cbccd454676816d213ae4d11ef965c470e025fe3 100644 (file)
@@ -47,12 +47,16 @@ static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
 {
        const struct idpf_vport *vport = rxq->q_vector->vport;
        const struct idpf_q_vec_rsrc *rsrc;
+       u32 frag_size = 0;
        bool split;
        int err;
 
+       if (idpf_queue_has(XSK, rxq))
+               frag_size = rxq->bufq_sets[0].bufq.truesize;
+
        err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx,
                                 rxq->q_vector->napi.napi_id,
-                                rxq->rx_buf_size);
+                                frag_size);
        if (err)
                return err;
 
index 676cbd80774d10d234962f3bf29f7e1ce1571d88..d95d3efdfd36fe2b3e96e09bc4368160e88855f5 100644 (file)
@@ -403,6 +403,7 @@ int idpf_xskfq_init(struct idpf_buf_queue *bufq)
        bufq->pending = fq.pending;
        bufq->thresh = fq.thresh;
        bufq->rx_buf_size = fq.buf_len;
+       bufq->truesize = fq.truesize;
 
        if (!idpf_xskfq_refill(bufq))
                netdev_err(bufq->pool->netdev,
index 846e902e31b600b7bd85f340c376a7264a010722..4882951d5c9c43e411c691d08b5564b253ec32cd 100644 (file)
@@ -167,6 +167,7 @@ int libeth_xskfq_create(struct libeth_xskfq *fq)
        fq->pending = fq->count;
        fq->thresh = libeth_xdp_queue_threshold(fq->count);
        fq->buf_len = xsk_pool_get_rx_frame_size(fq->pool);
+       fq->truesize = xsk_pool_get_rx_frag_step(fq->pool);
 
        return 0;
 }
index 481a7b28e6f248845fd892b62ec1880652c7d87a..82b5d21aae878486def8d48abcfdf11ea42ef23a 100644 (file)
@@ -597,6 +597,7 @@ __libeth_xsk_run_pass(struct libeth_xdp_buff *xdp,
  * @pending: current number of XSkFQEs to refill
  * @thresh: threshold below which the queue is refilled
  * @buf_len: HW-writeable length per each buffer
+ * @truesize: step between consecutive buffers, 0 if none exists
  * @nid: ID of the closest NUMA node with memory
  */
 struct libeth_xskfq {
@@ -614,6 +615,8 @@ struct libeth_xskfq {
        u32                     thresh;
 
        u32                     buf_len;
+       u32                     truesize;
+
        int                     nid;
 };